source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_binop__islt_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__islt_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__islt_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__islt_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint8) // A*D function (colscale): GB (_AxD__islt_uint8) // D*A function (rowscale): GB (_DxB__islt_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__islt_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__islt_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint8) // C=scalar+B GB (_bind1st__islt_uint8) // C=scalar+B' GB (_bind1st_tran__islt_uint8) // C=A+scalar GB (_bind2nd__islt_uint8) // C=A'+scalar GB (_bind2nd_tran__islt_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_UINT8 || GxB_NO_ISLT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__islt_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__islt_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
modifier_view.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2018, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: David Weese <david.weese@fu-berlin.de> // Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de> // ========================================================================== // TODO(holtgrew): Split into modified_string_mod_view.h and modified_iterator_mod_view.h. // TODO(holtgrew): Move out convert() #ifndef SEQAN_MODIFIER_MODIFIER_VIEW_H_ #define SEQAN_MODIFIER_MODIFIER_VIEW_H_ namespace seqan { // ========================================================================== // Forwards // ========================================================================== // ========================================================================== // Classes // ========================================================================== // -------------------------------------------------------------------------- // Class ModView // -------------------------------------------------------------------------- /*! * @class ModViewModifiedIterator * @extends ModifiedIterator * @headerfile <seqan/modifier.h> * * @brief Transforms the character of a host using a custom functor. * * @signature template <typename THost, typename TFunctor> * class ModifiedIterator<THost, ModView<TFunctor> >; * * @tparam THost The host iterator. * @tparam TFunctor A unary functor type. */ /*! * @class ModViewModifiedString * @extends ModifiedString * @headerfile <seqan/modifier.h> * * @brief Transforms the character of a host using a custom functor. * * @signature template <typename THost, typename TFunctor> * class ModifiedString<THost, ModView<TFunctor> >; * * @tparam THost The host iterator. * @tparam TFunctor A unary functor type. */ template <typename TFunctor> struct ModView {}; template <typename TFunctor> struct ModViewCargo { TFunctor func; ModViewCargo() : func() {} }; template <typename THost, typename TFunctor> class ModifiedIterator<THost, ModView<TFunctor> > { public: typedef typename Cargo<ModifiedIterator>::Type TCargo_; THost _host; TCargo_ _cargo; mutable typename Value<ModifiedIterator>::Type tmp_value; ModifiedIterator() : _host(), tmp_value() {} template <typename TOtherHost> ModifiedIterator(ModifiedIterator<TOtherHost, ModView<TFunctor> > & origin) : _host(origin._host), _cargo(origin._cargo), tmp_value() {} explicit ModifiedIterator(THost const & host) : _host(host), tmp_value() {} ModifiedIterator(THost const & host, TFunctor const & functor): _host(host), tmp_value() { cargo(*this).func = functor; } }; // -------------------------------------------------------------------------- // Class ModifiedString // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> class ModifiedString<THost, ModView<TFunctor> > { public: typedef typename Pointer_<THost>::Type THostPointer_; typedef typename Cargo<ModifiedString>::Type TCargo_; mutable THostPointer_ _host; TCargo_ _cargo; mutable typename Value<ModifiedString>::Type tmp_value; // Default constructor. ModifiedString() : _host(), tmp_value() {} // Construct with the actual host. explicit ModifiedString(typename Parameter_<THost>::Type host): _host(_toPointer(host)), tmp_value() {} // Construct with the functor. explicit ModifiedString(TFunctor const & functor): _host(), tmp_value() { cargo(*this).func = functor; } // Constructor for creating a ModifiedString with const host from a non-const host. template <typename THost_> explicit ModifiedString(THost_ & host, SEQAN_CTOR_ENABLE_IF(IsConstructible<THost, THost_>)) : _host(_toPointer(host)), tmp_value() { ignoreUnusedVariableWarning(dummy); } // Construct with the actual host; variant with functor. ModifiedString(typename Parameter_<THost>::Type host, TFunctor const & functor) : _host(_toPointer(host)), tmp_value() { cargo(*this).func = functor; } // Constructor for creating a ModifiedString with const host with a non-const host; variant with functor. template <typename THost_> explicit ModifiedString(THost_ & host, TFunctor const & functor, SEQAN_CTOR_ENABLE_IF(IsConstructible<THost, THost_>)) : _host(_toPointer(host)), tmp_value() { ignoreUnusedVariableWarning(dummy); cargo(*this).func = functor; } // Constructor for innermost type; hand down to _host which is a ModifiedString itself. template <typename THost_> explicit ModifiedString(THost_ && host, SEQAN_CTOR_ENABLE_IF(IsAnInnerHost< typename RemoveReference<THost>::Type, typename RemoveReference<THost_>::Type >)) : _host(std::forward<THost_>(host)), tmp_value() { ignoreUnusedVariableWarning(dummy); } // Constructor for innermost type; hand down to _host which is a ModifiedString itself. Variant with functor. template <typename THost_> explicit ModifiedString(THost_ && host, TFunctor const & functor, SEQAN_CTOR_ENABLE_IF(IsAnInnerHost< typename RemoveReference<THost>::Type, typename RemoveReference<THost_>::Type >)) : _host(std::forward<THost_>(host)), tmp_value() { ignoreUnusedVariableWarning(dummy); cargo(*this).func = functor; } template <typename TPos> inline typename Reference<ModifiedString>::Type operator[](TPos pos) { return value(*this, pos); } template <typename TPos> inline typename Reference<ModifiedString const>::Type operator[](TPos pos) const { return value(*this, pos); } }; // ========================================================================== // Metafunctions // ========================================================================== // -------------------------------------------------------------------------- // Metafunction Cargo [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> struct Cargo<ModifiedIterator<THost, ModView<TFunctor> > > { typedef ModViewCargo<TFunctor> Type; }; // -------------------------------------------------------------------------- // Metafunction Value [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> struct Value<ModifiedIterator<THost, ModView<TFunctor> > > { typedef typename TFunctor::result_type TResult_; typedef typename RemoveConst_<TResult_>::Type Type; }; template <typename THost, typename TFunctor> struct Value<ModifiedIterator<THost, ModView<TFunctor> > const> : Value<ModifiedIterator<THost, ModView<TFunctor> > > {}; // -------------------------------------------------------------------------- // Metafunction GetValue [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> struct GetValue<ModifiedIterator<THost, ModView<TFunctor> > > : Value<ModifiedIterator<THost, ModView<TFunctor> > > {}; template <typename THost, typename TFunctor> struct GetValue<ModifiedIterator<THost, ModView<TFunctor> > const> : Value<ModifiedIterator<THost, ModView<TFunctor> > > {}; // -------------------------------------------------------------------------- // Metafunction Reference [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> struct Reference<ModifiedIterator<THost, ModView<TFunctor> > > : Value<ModifiedIterator<THost, ModView<TFunctor> > > {}; template <typename THost, typename TFunctor> struct Reference<ModifiedIterator<THost, ModView<TFunctor> > const> : Value<ModifiedIterator<THost, ModView<TFunctor> > > {}; // NOTE(h-2): ModView element access is always by copy never by reference // This is a workaround for dangling references to the stack when // combining infixes and modified views, more precisely: // if you iterate over an infix of a modview then value() on the iterator // will return reference to the tmp_value inside the moditerator // which might have been destructed. // This is a more general problem that stems from the fact that // "virtual strings" of the same type (infixes, modstrings) can be // automatically compacted into one layer, but combinations cannot. // This workaround happens in ModView, because it is used less frequently // then Infixes. // -------------------------------------------------------------------------- // Metafunction Cargo [ModifiedString] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> struct Cargo< ModifiedString<THost, ModView<TFunctor> > > { typedef ModViewCargo<TFunctor> Type; }; // ========================================================================== // Functions // ========================================================================== // -------------------------------------------------------------------------- // Function getValue() [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > >::Type getValue(ModifiedIterator<THost, ModView<TFunctor> > & me) { return cargo(me).func(*host(me)); } template <typename THost, typename TFunctor> inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > const>::Type getValue(ModifiedIterator<THost, ModView<TFunctor> > const & me) { return cargo(me).func(*host(me)); } // -------------------------------------------------------------------------- // Function value() [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > >::Type value(ModifiedIterator<THost, ModView<TFunctor> > & me) { return getValue(me); } template <typename THost, typename TFunctor> inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > const>::Type value(ModifiedIterator<THost, ModView<TFunctor> > const & me) { return getValue(me); } // -------------------------------------------------------------------------- // Function getValue() [ModifiedString] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor, typename TPos> inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > >::Type getValue(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos) { return cargo(me).func(getValue(host(me), pos)); } template <typename THost, typename TFunctor, typename TPos> inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > const>::Type getValue(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos) { return cargo(me).func(getValue(host(me), pos)); } // -------------------------------------------------------------------------- // Function value() [ModifiedString] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor, typename TPos> inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > >::Type value(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos) { return getValue(me, pos); } template <typename THost, typename TFunctor, typename TPos> inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > const>::Type value(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos) { return getValue(me, pos); } // -------------------------------------------------------------------------- // Function assignModViewFunctor() // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> inline void assignModViewFunctor(ModifiedString<THost, ModView<TFunctor> > & me, TFunctor const & functor) { cargo(me).func = functor; } // -------------------------------------------------------------------------- // Function convert() // -------------------------------------------------------------------------- template < typename TSequence, typename TFunctor > inline void convert(TSequence & sequence, TFunctor const &F) { #if defined (_OPENMP) && defined (SEQAN_PARALLEL) // OpenMP does not support for loop with iterators. Therefore use index variables. typedef typename Position<TSequence>::Type TPos; typedef typename MakeSigned_<TPos>::Type TSignedPos; #pragma omp parallel for if(length(sequence) > 1000000) for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p) sequence[p] = F(sequence[p]); #else typedef typename Iterator<TSequence, Standard>::Type TIter; TIter it = begin(sequence, Standard()); TIter itEnd = end(sequence, Standard()); for(; it != itEnd; ++it) *it = F(*it); #endif } template < typename TSequence, typename TFunctor > inline void convert(TSequence const & sequence, TFunctor const &F) { #if defined (_OPENMP) && defined (SEQAN_PARALLEL) // OpenMP does not support for loop with iterators. Therefore use index variables. typedef typename Position<TSequence>::Type TPos; typedef typename MakeSigned_<TPos>::Type TSignedPos; #pragma omp parallel for if(length(sequence) > 1000000) for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p) sequence[p] = F(sequence[p]); #else typedef typename Iterator<TSequence const, Standard>::Type TIter; TIter it = begin(sequence, Standard()); TIter itEnd = end(sequence, Standard()); for(; it != itEnd; ++it) *it = F(*it); #endif } } // namespace seqan #endif // SEQAN_MODIFIER_MODIFIER_VIEW_H_
cpotrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zpotrf.c, normal z -> c, Fri Sep 28 17:38:02 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_potrf * * Performs the Cholesky factorization of a Hermitian positive definite * matrix A. The factorization has the form * * \f[ A = L \times L^H, \f] * or * \f[ A = U^H \times U, \f] * * where U is an upper triangular matrix and L is a lower triangular matrix. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in,out] pA * On entry, the Hermitian positive definite matrix A. * If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading N-by-N lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * On exit, if return value = 0, the factor U or L from the Cholesky * factorization A = U^H*U or A = L*L^H. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * @retval > 0 if i, the leading minor of order i of A is not * positive definite, so the factorization could not * be completed, and the solution has not been computed. * ******************************************************************************* * * @sa plasma_omp_cpotrf * @sa plasma_cpotrf * @sa plasma_dpotrf * @sa plasma_spotrf * ******************************************************************************/ int plasma_cpotrf(plasma_enum_t uplo, int n, plasma_complex32_t *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -4; } // quick return if (imax(n, 0) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_potrf(plasma, PlasmaComplexFloat, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrix. plasma_desc_t A; int retval; retval = plasma_desc_triangular_create(PlasmaComplexFloat, uplo, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_ctr2desc(pA, lda, A, &sequence, &request); // Call the tile async function. plasma_omp_cpotrf(uplo, A, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2tr(A, pA, lda, &sequence, &request); } // implicit synchronization // Free matrix A in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_potrf * * Performs the Cholesky factorization of a Hermitian positive definite * matrix. * Non-blocking tile version of plasma_cpotrf(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * On entry, the Hermitian positive definite matrix A. * If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading N-by-N lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * On exit, if return value = 0, the factor U or L from the Cholesky * factorization A = U^H*U or A = L*L^H. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_cpotrf * @sa plasma_omp_cpotrf * @sa plasma_omp_cpotrf * @sa plasma_omp_dpotrf * @sa plasma_omp_spotrf * ******************************************************************************/ void plasma_omp_cpotrf(plasma_enum_t uplo, plasma_desc_t A, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0) return; // Call the parallel function. plasma_pcpotrf(uplo, A, sequence, request); }
constitute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE % % C O O NN N SS T I T U U T E % % C O O N N N ESSS T I T U U T EEE % % C O O N NN SS T I T U U T E % % CCCC OOO N N SSSSS T IIIII T UUU T EEEEE % % % % % % MagickCore Methods to Consitute an Image % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/coder-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/constitute-private.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/identify.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n s t i t u t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteImage() returns an image from the pixel data you supply. % The pixel data must be in scanline order top-to-bottom. The data can be % char, short int, int, float, or double. Float and double require the % pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to % create a 640x480 image from unsigned red-green-blue character data, use: % % image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception); % % The format of the ConstituteImage method is: % % Image *ConstituteImage(const size_t columns,const size_t rows, % const char *map,const StorageType storage,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: width in pixels of the image. % % o rows: height in pixels of the image. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose % from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConstituteImage(const size_t columns,const size_t rows, const char *map,const StorageType storage,const void *pixels, ExceptionInfo *exception) { Image *image; MagickBooleanType status; register ssize_t i; size_t length; /* Allocate image structure. */ assert(map != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map); assert(pixels != (void *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage((ImageInfo *) NULL,exception); if (image == (Image *) NULL) return((Image *) NULL); length=strlen(map); for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'a': case 'A': case 'O': case 'o': { image->alpha_trait=BlendPixelTrait; break; } case 'C': case 'c': case 'm': case 'M': case 'Y': case 'y': case 'K': case 'k': { image->colorspace=CMYKColorspace; break; } case 'I': case 'i': { image->colorspace=GRAYColorspace; break; } default: { if (length == 1) image->colorspace=GRAYColorspace; break; } } } status=SetImageExtent(image,columns,rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImage() returns all the properties of an image or image sequence % except for the pixels. It is much faster and consumes far less memory % than ReadImage(). On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the PingImage method is: % % Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Ping the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t PingStream(const Image *magick_unused(image), const void *magick_unused(pixels),const size_t columns) { magick_unreferenced(image); magick_unreferenced(pixels); return(columns); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport Image *PingImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; ImageInfo *ping_info; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); ping_info=CloneImageInfo(image_info); ping_info->ping=MagickTrue; image=ReadStream(ping_info,&PingStream,exception); if (image != (Image *) NULL) { ResetTimer(&image->timer); if (ping_info->verbose != MagickFalse) (void) IdentifyImage(image,stdout,MagickFalse,exception); } ping_info=DestroyImageInfo(ping_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImages() pings one or more images and returns them as an image list. % % The format of the PingImage method is: % % Image *PingImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImage() reads an image or image sequence from a file or file handle. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadImage method is: % % Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Read the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsCoderAuthorized(const char *coder, const PolicyRights rights,ExceptionInfo *exception) { if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",coder); return(MagickFalse); } return(MagickTrue); } MagickExport Image *ReadImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent], magick[MagickPathExtent], magick_filename[MagickPathExtent]; const char *value; const DelegateInfo *delegate_info; const MagickInfo *magick_info; DecodeImageHandler *decoder; ExceptionInfo *sans_exception; GeometryInfo geometry_info; Image *image, *next; ImageInfo *read_info; MagickBooleanType status; MagickStatusType flags; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image_info->filename != (char *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); (void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent); (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) CopyMagickString(magick,read_info->magick,MagickPathExtent); /* Call appropriate image reader based on image type. */ sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(read_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) read_info->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } if ((magick_info != (const MagickInfo *) NULL) && (GetMagickDecoderSeekableStream(magick_info) != MagickFalse)) { image=AcquireImage(read_info,exception); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } if (IsBlobSeekable(image) == MagickFalse) { /* Coder requires a seekable stream. */ *read_info->filename='\0'; status=ImageToFile(image,read_info->filename,exception); if (status == MagickFalse) { (void) CloseBlob(image); read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } read_info->temporary=MagickTrue; } (void) CloseBlob(image); image=DestroyImage(image); } image=NewImageList(); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(read_info->filename,filename, MagickPathExtent); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); } } if (decoder != (DecodeImageHandler *) NULL) { /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(GetMagickModuleName(magick_info), ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=decoder(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); if (read_info->temporary != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Let our decoding delegate process the image. */ image=AcquireImage(read_info,exception); if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return((Image *) NULL); } (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); *read_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL, exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); image=DestroyImageList(image); read_info->temporary=MagickTrue; if (status != MagickFalse) (void) SetImageInfo(read_info,0,exception); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { if (IsPathAccessible(read_info->filename) != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); else ThrowFileException(exception,FileOpenError,"UnableToOpenFile", read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(GetMagickModuleName(magick_info), ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=(decoder)(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } if (read_info->temporary != MagickFalse) { (void) RelinquishUniqueFileResource(read_info->filename); read_info->temporary=MagickFalse; if (image != (Image *) NULL) (void) CopyMagickString(image->filename,filename,MagickPathExtent); } if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return(image); } if (exception->severity >= ErrorException) (void) LogMagickEvent(ExceptionEvent,GetMagickModule(), "Coder (%s) generated an image despite an error (%d), " "notify the developers",image->magick,exception->severity); if (IsBlobTemporary(image) != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) && (GetImageListLength(image) != 1)) { Image *clones; clones=CloneImages(image,read_info->scenes,exception); if (clones != (Image *) NULL) { image=DestroyImageList(image); image=GetFirstImageInList(clones); } } for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { char magick_path[MagickPathExtent], *property, timestamp[MagickPathExtent]; const char *option; const StringInfo *profile; ssize_t option_type; next->taint=MagickFalse; GetPathComponent(magick_filename,MagickPath,magick_path); if (*magick_path == '\0' && *next->magick == '\0') (void) CopyMagickString(next->magick,magick,MagickPathExtent); (void) CopyMagickString(next->magick_filename,magick_filename, MagickPathExtent); if (IsBlobTemporary(image) != MagickFalse) (void) CopyMagickString(next->filename,filename,MagickPathExtent); if (next->magick_columns == 0) next->magick_columns=next->columns; if (next->magick_rows == 0) next->magick_rows=next->rows; value=GetImageProperty(next,"exif:Orientation",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:Orientation",exception); if (value != (char *) NULL) { next->orientation=(OrientationType) StringToLong(value); (void) DeleteImageProperty(next,"tiff:Orientation"); (void) DeleteImageProperty(next,"exif:Orientation"); } value=GetImageProperty(next,"exif:XResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.x; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.x=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:XResolution"); } value=GetImageProperty(next,"exif:YResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.y; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.y=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:YResolution"); } value=GetImageProperty(next,"exif:ResolutionUnit",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:ResolutionUnit",exception); if (value != (char *) NULL) { option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse, value); if (option_type >= 0) next->units=(ResolutionType) option_type; (void) DeleteImageProperty(next,"exif:ResolutionUnit"); (void) DeleteImageProperty(next,"tiff:ResolutionUnit"); } if (next->page.width == 0) next->page.width=next->columns; if (next->page.height == 0) next->page.height=next->rows; option=GetImageOption(read_info,"caption"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"caption",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"comment"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"comment",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"label"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"label",property,exception); property=DestroyString(property); } if (LocaleCompare(next->magick,"TEXT") == 0) (void) ParseAbsoluteGeometry("0x0+0+0",&next->page); if ((read_info->extract != (char *) NULL) && (read_info->stream == (StreamHandler) NULL)) { RectangleInfo geometry; SetGeometry(next,&geometry); flags=ParseAbsoluteGeometry(read_info->extract,&geometry); if ((next->columns != geometry.width) || (next->rows != geometry.height)) { if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { Image *crop_image; crop_image=CropImage(next,&geometry,exception); if (crop_image != (Image *) NULL) ReplaceImageInList(&next,crop_image); } else if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0)) { Image *size_image; flags=ParseRegionGeometry(next,read_info->extract,&geometry, exception); size_image=ResizeImage(next,geometry.width,geometry.height, next->filter,exception); if (size_image != (Image *) NULL) ReplaceImageInList(&next,size_image); } } } profile=GetImageProfile(next,"icc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"icm"); profile=GetImageProfile(next,"iptc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"8bim"); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:modify",timestamp,exception); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:create",timestamp,exception); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (next->delay > (size_t) floor(geometry_info.rho+0.5)) next->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (next->delay < (size_t) floor(geometry_info.rho+0.5)) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else next->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) { option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse, option); if (option_type >= 0) next->dispose=(DisposeType) option_type; } if (read_info->verbose != MagickFalse) (void) IdentifyImage(next,stderr,MagickFalse,exception); image=next; } read_info=DestroyImageInfo(read_info); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImages() reads one or more images and returns them as an image list. % % The format of the ReadImage method is: % % Image *ReadImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char read_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Read image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; (void) SetImageOption(read_info,"filename",filename); (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(read_info,(Image *) NULL,filename, (int) read_info->scene,read_filename,exception); if (LocaleCompare(read_filename,read_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes != 0) { (void) CopyMagickString(read_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); scene=(ssize_t) read_info->scene; for ( ; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL, read_filename,(int) scene,read_info->filename,exception); image=ReadImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } } (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d I n l i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadInlineImage() reads a Base64-encoded inline image or image sequence. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadInlineImage method is: % % Image *ReadInlineImage(const ImageInfo *image_info,const char *content, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o content: the image encoded in Base64. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadInlineImage(const ImageInfo *image_info, const char *content,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; unsigned char *blob; size_t length; register const char *p; /* Skip over header (e.g. data:image/gif;base64,). */ image=NewImageList(); for (p=content; (*p != ',') && (*p != '\0'); p++) ; if (*p == '\0') ThrowReaderException(CorruptImageError,"CorruptImage"); p++; length=0; blob=Base64Decode(p,&length); if (length == 0) { blob=(unsigned char *) RelinquishMagickMemory(blob); ThrowReaderException(CorruptImageError,"CorruptImage"); } read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); *read_info->filename='\0'; *read_info->magick='\0'; image=BlobToImage(read_info,blob,length,exception); blob=(unsigned char *) RelinquishMagickMemory(blob); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImage() writes an image or an image sequence to a file or file handle. % If writing to a file is on disk, the name is defined by the filename member % of the image structure. WriteImage() returns MagickFalse is there is a % memory shortage or if the image cannot be written. Check the exception % member of image to determine the cause for any failure. % % The format of the WriteImage method is: % % MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; const MagickInfo *magick_info; EncodeImageHandler *encoder; ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType status, temporary; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); sans_exception=AcquireExceptionInfo(); write_info=CloneImageInfo(image_info); (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) SetImageInfo(write_info,1,sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); /* Call appropriate image writer based on image type. */ magick_info=GetMagickInfo(write_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) image->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } (void) SyncImageProfiles(image); DisassociateImageStream(image); option=GetImageOption(image_info,"delegate:bimodal"); if ((IsStringTrue(option) != MagickFalse) && (write_info->page == (char *) NULL) && (GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) == (Image *) NULL) && (IsTaintImage(image) == MagickFalse) ) { delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception); if ((delegate_info != (const DelegateInfo *) NULL) && (GetDelegateMode(delegate_info) == 0) && (IsPathAccessible(image->magick_filename) != MagickFalse)) { /* Process image with bi-modal delegate. */ (void) CopyMagickString(image->filename,image->magick_filename, MagickPathExtent); status=InvokeDelegate(write_info,image,image->magick, write_info->magick,exception); write_info=DestroyImageInfo(write_info); (void) CopyMagickString(image->filename,filename,MagickPathExtent); return(status); } } status=MagickFalse; temporary=MagickFalse; if ((magick_info != (const MagickInfo *) NULL) && (GetMagickEncoderSeekableStream(magick_info) != MagickFalse)) { char image_filename[MagickPathExtent]; (void) CopyMagickString(image_filename,image->filename,MagickPathExtent); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); (void) CopyMagickString(image->filename, image_filename,MagickPathExtent); if (status != MagickFalse) { if (IsBlobSeekable(image) == MagickFalse) { /* A seekable stream is required by the encoder. */ write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) AcquireUniqueFilename(image->filename); temporary=MagickTrue; } (void) CloseBlob(image); } } encoder=GetImageEncoder(magick_info); if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(GetMagickModuleName(magick_info), WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception); if (delegate_info != (DelegateInfo *) NULL) { /* Process the image with delegate. */ *write_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(write_info,image,(char *) NULL, write_info->magick,exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); (void) CopyMagickString(image->filename,filename,MagickPathExtent); } else { sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(write_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((write_info->affirm == MagickFalse) && (magick_info == (const MagickInfo *) NULL)) { (void) CopyMagickString(write_info->magick,image->magick, MagickPathExtent); magick_info=GetMagickInfo(write_info->magick,exception); } encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) { char extension[MagickPathExtent]; GetPathComponent(image->filename,ExtensionPath,extension); if (*extension != '\0') magick_info=GetMagickInfo(extension,exception); else magick_info=GetMagickInfo(image->magick,exception); (void) CopyMagickString(image->filename,filename, MagickPathExtent); encoder=GetImageEncoder(magick_info); } if (encoder == (EncodeImageHandler *) NULL) { magick_info=GetMagickInfo(image->magick,exception); encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoEncodeDelegateForThisImageFormat", "`%s'",write_info->magick); } if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(GetMagickModuleName(magick_info), WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } } } if (temporary != MagickFalse) { /* Copy temporary image file to permanent. */ status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception); if (status != MagickFalse) { (void) RelinquishUniqueFileResource(write_info->filename); status=ImageToFile(image,write_info->filename,exception); } (void) CloseBlob(image); (void) RelinquishUniqueFileResource(image->filename); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); } if ((LocaleCompare(write_info->magick,"info") != 0) && (write_info->verbose != MagickFalse)) (void) IdentifyImage(image,stdout,MagickFalse,exception); write_info=DestroyImageInfo(write_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImages() writes an image sequence into one or more files. While % WriteImage() can write an image sequence, it is limited to writing % the sequence into a single file using a format which supports multiple % frames. WriteImages(), however, does not have this limitation, instead it % generates multiple output files if necessary (or when requested). When % ImageInfo's adjoin flag is set to MagickFalse, the file name is expected % to include a printf-style formatting string for the frame number (e.g. % "image%02d.png"). % % The format of the WriteImages method is: % % MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o images: the image list. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info, Image *images,const char *filename,ExceptionInfo *exception) { #define WriteImageTag "Write/Image" ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType proceed; MagickOffsetType progress; MagickProgressMonitor progress_monitor; MagickSizeType number_images; MagickStatusType status; register Image *p; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); write_info=CloneImageInfo(image_info); *write_info->magick='\0'; images=GetFirstImageInList(images); if (filename != (const char *) NULL) for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) (void) CopyMagickString(p->filename,filename,MagickPathExtent); (void) CopyMagickString(write_info->filename,images->filename, MagickPathExtent); sans_exception=AcquireExceptionInfo(); (void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images), sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent); p=images; for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p)) { register Image *next; next=GetNextImageInList(p); if (next == (Image *) NULL) break; if (p->scene >= next->scene) { register ssize_t i; /* Generate consistent scene numbers. */ i=(ssize_t) images->scene; for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) p->scene=(size_t) i++; break; } } /* Write images. */ status=MagickTrue; progress_monitor=(MagickProgressMonitor) NULL; progress=0; number_images=GetImageListLength(images); for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) { if (number_images != 1) progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL, p->client_data); status&=WriteImage(write_info,p,exception); if (number_images != 1) (void) SetImageProgressMonitor(p,progress_monitor,p->client_data); if (write_info->adjoin != MagickFalse) break; if (number_images != 1) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(p,WriteImageTag,progress,number_images); if (proceed == MagickFalse) break; } } write_info=DestroyImageInfo(write_info); return(status != 0 ? MagickTrue : MagickFalse); }
origin.h
#pragma once #include <assert.h> #include <iostream> //#include <malloc.h> #include <memory> #include "omp.h" #include <stdlib.h> #include <time.h> #include <vector> using namespace std; void bcxy_kcrs_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { int StrideX=uSx; int StrideY=uSy; //#pragma omp parallel for collapse(5) for (int b = 0; b < Nb; b++) { #pragma omp parallel for for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int s = 0; s < Ns; s++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += In[b * Ns * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + s * (StrideX* Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + (StrideX*x + w) * (StrideY*Ny + Nh - 1) + (StrideY*y + h)] * Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; } } } } } } } } void origin_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { int StrideX=uSx; int StrideY=uSy; #pragma omp parallel for collapse(5) for (int b = 0; b < Nb; b++) { for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { for (int s = 0; s < Ns; s++) { /* Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += */ /* In[b * Ns * (Nx + Nw - 1) * (Ny + Nh - 1) + */ /* s * (Nx + Nw - 1) * (Ny + Nh - 1) + */ /* (x + w) * (Ny + Nh - 1) + (y + h)] * */ /* Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; */ int kt1 = t /LKF; int kt2 = t %LKF; int ot1 = t /LOF; int ot2 = t %LOF; int s1 = s / LC; int s2 = s%LC; int Ooffset = b* Nt * Nx * Ny + ot1 * Nx * Ny*LOF + x*Ny*LOF + y*LOF + ot2; int Ioffset = b * Ns * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + s1 * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) * LC + (StrideX*x + w) * (StrideY*Ny + Nh - 1)*LC + (StrideY*y + h) * LC + s2; int Koffset = kt1 * Ns * Nw * Nh * LKF + s * Nw * Nh*LKF + w * Nh*LKF + h*LKF + kt2; Out[Ooffset] += In[Ioffset]* Ker[Koffset]; // if(Ooffset == 896){ // cout<<"Inoff="<<Ioffset<<", Koff="<<Koffset<<endl; // } } } } } } } } } int compare(float *C1, float *C2, int size) { cout << "comparing" << endl; for (int i = 0; i < size; i++) { if (C1[i] != C2[i]) { cout << "data at " << i << " C1=" << C1[i] << ", C2=" << C2[i] << endl; return -1; } } cout << "fin compare\n"; return 0; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(1,ceild(8*t2-Nz+9,4)),t1+1);t3<=min(floord(4*Nt+Ny-9,4),floord(4*t1+Ny-1,4));t3++) { for (t4=max(max(ceild(t1-62,64),ceild(8*t2-Nz-243,256)),ceild(4*t3-Ny-243,256));t4<=min(min(floord(4*Nt+Nx-9,256),floord(4*t1+Nx-1,256)),floord(4*t3+Nx-9,256));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),t3-1),64*t4+62);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
valid.yolo3.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_128_136_136_64_3_3.h" #include "gen_ukr_A4B2gemm_1_128_136_136_64_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 136; int Ny = 136; int Nh = 3; long long Astrides[6] = {0,1,2,3,4,5}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int c5=0;c5<64+0;c5+=64) { for(int xy5=0;xy5<18496+0;xy5+=18496) { for(int f5=0;f5<128+0;f5+=128) { for(int xy4=xy5;xy4<min(18496, 18496+xy5);xy4+=18496) { for(int f4=f5;f4<min(128, 128+f5);f4+=128) { for(int c4=c5;c4<min(64, 64+c5);c4+=64) { for(int c3=c4;c3<min(64, 64+c4);c3+=Tc1) { for(int xy3=xy4;xy3<min(18496, 18496+xy4);xy3+=Txy3) { for(int f3=f4;f3<min(128, 128+f4);f3+=Tf2) { for(int xy2=xy3;xy2<min(18496, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(128, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(64, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(64, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(18496, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(128, 16+f2);f1+=16) { int ctile=min(Tc1, 64-c1); int x1=xy1/136; int y1=xy1%136/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*1218816+c1_1*19044+1*x1*138+1*y1*1+c1_2*1; int offsetB=0+kf1_1*9216+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*2367488+of1_1*18496+x1*136+y1*1+of1_2*1; if(136-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(136*136-xy1>=6){ for(int sti=136-y1;sti<6;sti+=1) { Astrides[sti]+=2; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=136-y1;sti<6;sti+=1) { Astrides[sti]-=2; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
diffusion_grid.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef DIFFUSION_GRID_H_ #define DIFFUSION_GRID_H_ #include <assert.h> #include <Rtypes.h> #include <algorithm> #include <array> #include <cmath> #include <functional> #include <iostream> #include <string> #include <vector> #include "math_util.h" #include "param.h" namespace bdm { /// A class that computes the diffusion of extracellular substances /// It maintains the concentration and gradient of a single substance class DiffusionGrid { public: explicit DiffusionGrid(TRootIOCtor* p) {} DiffusionGrid(int substance_id, std::string substance_name, double dc, double mu, int resolution = 10) : substance_(substance_id), substance_name_(substance_name), dc_({{1 - dc, dc / 6, dc / 6, dc / 6, dc / 6, dc / 6, dc / 6}}), mu_(mu), resolution_(resolution) {} virtual ~DiffusionGrid() {} /// @brief Initializes the grid by calculating the grid dimensions /// and number of boxes along the axis from the input arguments /// /// @param[in] grid_dimensions The grid dimensions /// @param[in] box_length The box length /// void Initialize(const std::array<int32_t, 6>& grid_dimensions) { // Get grid properties from neighbor grid grid_dimensions_ = grid_dimensions; assert(resolution_ > 0 && "The resolution cannot be zero!"); num_boxes_axis_[0] = resolution_; num_boxes_axis_[1] = resolution_; num_boxes_axis_[2] = resolution_; box_length_ = (grid_dimensions_[1] - grid_dimensions_[0]) / static_cast<double>(resolution_); box_volume_ = box_length_ * box_length_ * box_length_; assert(box_length_ > 0 && "Box length of diffusion grid must be greater than zero!"); // Set the parity of the number of boxes along the dimensions (since all // dimensions are the same, we just take the x-axis here) parity_ = num_boxes_axis_[0] % 2; total_num_boxes_ = num_boxes_axis_[0] * num_boxes_axis_[1] * num_boxes_axis_[2]; // Allocate memory for the concentration and gradient arrays c1_.resize(total_num_boxes_); c2_.resize(total_num_boxes_); gradients_.resize(3 * total_num_boxes_); initialized_ = true; } void RunInitializers() { assert(num_boxes_axis_[0] > 0 && "The number of boxes along an axis was found to be zero!"); if (initializers_.empty()) { return; } auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; // Apply all functions that initialize this diffusion grid for (size_t f = 0; f < initializers_.size(); f++) { for (size_t x = 0; x < nx; x++) { double real_x = grid_dimensions_[0] + x * box_length_; for (size_t y = 0; y < ny; y++) { double real_y = grid_dimensions_[2] + y * box_length_; for (size_t z = 0; z < nz; z++) { double real_z = grid_dimensions_[4] + z * box_length_; std::array<uint32_t, 3> box_coord; box_coord[0] = x; box_coord[1] = y; box_coord[2] = z; size_t idx = GetBoxIndex(box_coord); IncreaseConcentrationBy(idx, initializers_[f](real_x, real_y, real_z)); } } } } // Clear the initializer to free up space initializers_.clear(); initializers_.shrink_to_fit(); } /// @brief Updates the grid dimensions, based on the given threshold /// values. The diffusion grid dimensions need always be larger /// than the neighbor grid dimensions, so that each simulation /// object can obtain its local concentration / gradient /// /// @param[in] threshold_dimensions The threshold values /// void Update(const std::array<int32_t, 2>& threshold_dimensions) { // Update the grid dimensions such that each dimension ranges from // {treshold_dimensions[0] - treshold_dimensions[1]} auto min_gd = threshold_dimensions[0]; auto max_gd = threshold_dimensions[1]; grid_dimensions_ = {min_gd, max_gd, min_gd, max_gd, min_gd, max_gd}; // If the grid is not perfectly divisible along each dimension by the // box length, extend the grid so that it is int dimension_length = max_gd - min_gd; for (int i = 0; i < 3; i++) { int r = fmod(dimension_length, box_length_); if (r > 1e-9) { // std::abs for the case that box_length_ > dimension_length grid_dimensions_[2 * i + 1] += (box_length_ - r); } } // Calculate by how many boxes each dimension has grown int new_dimension_length = grid_dimensions_[1] - grid_dimensions_[0]; int new_num_boxes = std::ceil(new_dimension_length / box_length_); int growth = new_num_boxes - num_boxes_axis_[0]; if (growth > 0) { // Store the old number of boxes along each axis for comparison std::array<size_t, 3> tmp_num_boxes_axis = num_boxes_axis_; // Increase number of boxes along axis accordingly num_boxes_axis_[0] += growth; num_boxes_axis_[1] += growth; num_boxes_axis_[2] += growth; // We need to maintain the parity of the number of boxes along each // dimension, otherwise copying of the substances to the increases grid // will not be symmetrically done; resulting in shifting of boxes // We add a box in the negative direction, because the only way the parity // could have changed is because of adding a box in the positive direction // (due to the grid not being perfectly divisible; see above) if (num_boxes_axis_[0] % 2 != parity_) { for (int i = 0; i < 3; i++) { grid_dimensions_[2 * i] -= box_length_; num_boxes_axis_[i]++; } } // Temporarily save previous grid data std::vector<double> tmp_c1 = c1_; std::vector<double> tmp_gradients = gradients_; c1_.clear(); c2_.clear(); gradients_.clear(); total_num_boxes_ = num_boxes_axis_[0] * num_boxes_axis_[1] * num_boxes_axis_[2]; CopyOldData(tmp_c1, tmp_gradients, tmp_num_boxes_axis); assert(total_num_boxes_ >= tmp_num_boxes_axis[0] * tmp_num_boxes_axis[1] * tmp_num_boxes_axis[2] && "The diffusion grid tried to shrink! It can only become larger"); } } /// Copies the concentration and gradients values to the new /// (larger) grid. In the 2D case it looks like the following: /// /// [0 0 0 0] /// [v1 v2] --> [0 v1 v2 0] /// [v3 v4] --> [0 v3 v4 0] /// [0 0 0 0] /// /// The dimensions are doubled in this case from 2x2 to 4x4 /// If the dimensions would be increased from 2x2 to 3x3, it will still /// be increased to 4x4 in order for GetBoxIndex to function correctly /// void CopyOldData(const std::vector<double>& old_c1, const std::vector<double>& old_gradients, const std::array<size_t, 3>& old_num_boxes_axis) { // Allocate more memory for the grid data arrays c1_.resize(total_num_boxes_); c2_.resize(total_num_boxes_); gradients_.resize(3 * total_num_boxes_); auto incr_dim_x = num_boxes_axis_[0] - old_num_boxes_axis[0]; auto incr_dim_y = num_boxes_axis_[1] - old_num_boxes_axis[1]; auto incr_dim_z = num_boxes_axis_[2] - old_num_boxes_axis[2]; int off_x = incr_dim_x / 2; int off_y = incr_dim_y / 2; int off_z = incr_dim_z / 2; int num_box_xy = num_boxes_axis_[0] * num_boxes_axis_[1]; int old_box_xy = old_num_boxes_axis[0] * old_num_boxes_axis[1]; int new_origin = off_z * (num_boxes_axis_[0] * num_boxes_axis_[1]) + off_y * num_boxes_axis_[0] + off_x; for (size_t k = 0; k < old_num_boxes_axis[2]; k++) { int offset = new_origin + k * num_box_xy; for (size_t j = 0; j < old_num_boxes_axis[1]; j++) { if (j != 0) { offset += num_boxes_axis_[0]; } for (size_t i = 0; i < old_num_boxes_axis[0]; i++) { auto idx = k * old_box_xy + j * old_num_boxes_axis[0] + i; c1_[offset + i] = old_c1[idx]; gradients_[3 * (offset + i)] = old_gradients[3 * idx]; gradients_[3 * (offset + i) + 1] = old_gradients[3 * idx + 1]; gradients_[3 * (offset + i) + 2] = old_gradients[3 * idx + 2]; } } } } /// Solves a 5-point stencil diffusion equation, with leaking-edge /// boundary conditions. Substances are allowed to leave the simulation /// space. This prevents building up concentration at the edges /// void DiffuseWithLeakingEdge() { int nx = num_boxes_axis_[0]; int ny = num_boxes_axis_[1]; int nz = num_boxes_axis_[2]; #define YBF 16 #pragma omp parallel for collapse(2) for (int yy = 0; yy < ny; yy += YBF) { for (int z = 0; z < nz; z++) { // To let the edges bleed we set some diffusion coefficients // to zero. This prevents substance building up at the edges auto dc_2_ = dc_; int ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (int y = yy; y < ymax; y++) { dc_2_ = dc_; int x; int c, n, s, b, t; x = 0; c = x + y * nx + z * nx * ny; if (y == 0) { n = c; dc_2_[4] = 0; } else { n = c - nx; } if (y == (ny - 1)) { s = c; dc_2_[3] = 0; } else { s = c + nx; } if (z == 0) { b = c; dc_2_[5] = 0; } else { b = c - nx * ny; } if (z == (nz - 1)) { t = c; dc_2_[6] = 0; } else { t = c + nx * ny; } // x = 0; we leak out substances past this edge (so multiply by 0) c2_[c] = (dc_2_[0] * c1_[c] + 0 * c1_[c] + dc_2_[2] * c1_[c + 1] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_2_[0] * c1_[c] + dc_2_[1] * c1_[c - 1] + dc_2_[2] * c1_[c + 1] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; // x = nx-1; we leak out substances past this edge (so multiply by 0) c2_[c] = (dc_2_[0] * c1_[c] + dc_2_[1] * c1_[c - 1] + 0 * c1_[c] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } /// Solves a 5-point stencil diffusion equation, with closed-edge /// boundary conditions. Substances are not allowed to leave the simulation /// space. Keep in mind that the concentration can build up at the edges /// void DiffuseWithClosedEdge() { auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x; int c, n, s, b, t; x = 0; c = x + y * nx + z * nx * ny; n = (y == 0) ? c : c - nx; s = (y == ny - 1) ? c : c + nx; b = (z == 0) ? c : c - nx * ny; t = (z == nz - 1) ? c : c + nx * ny; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c] + dc_[2] * c1_[c + 1] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c - 1] + dc_[2] * c1_[c + 1] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c - 1] + dc_[2] * c1_[c] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } void DiffuseEuler() { const auto nx = num_boxes_axis_[0]; const auto ny = num_boxes_axis_[1]; const auto nz = num_boxes_axis_[2]; const double ibl2 = 1 / (box_length_ * box_length_); const double d = 1 - dc_[0]; // TODO(ahmad): this probably needs to scale with Param::simulation_timestep const double dt = 1; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x = 0; int c, n, s, b, t; c = x + y * nx + z * nx * ny; #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; if (y == 0 || y == (ny - 1) || z == 0 || z == (nz - 1)) { continue; } n = c - nx; s = c + nx; b = c - nx * ny; t = c + nx * ny; c2_[c] = (c1_[c] + d * dt * (c1_[c - 1] - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; } // tile ny } // tile nz } // block ny c1_.swap(c2_); } void DiffuseEulerLeakingEdge() { const auto nx = num_boxes_axis_[0]; const auto ny = num_boxes_axis_[1]; const auto nz = num_boxes_axis_[2]; const double ibl2 = 1 / (box_length_ * box_length_); const double d = 1 - dc_[0]; std::array<int, 4> l; // TODO(ahmad): this probably needs to scale with Param::simulation_timestep const double dt = 1; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x = 0; int c, n, s, b, t; c = x + y * nx + z * nx * ny; n = (y == 0) ? c : c - nx; s = (y == ny - 1) ? c : c + nx; b = (z == 0) ? c : c - nx * ny; t = (z == nz - 1) ? c : c + nx * ny; c2_[c] = (c1_[c] + d * dt * (0 - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; l.fill(1); if (y == 0) { l[0] = 0; } if (y == ny - 1) { l[1] = 0; } if (z == 0) { l[2] = 0; } if (z == nz - 1) { l[3] = 0; } c2_[c] = (c1_[c] + d * dt * (c1_[c - 1] - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt * (l[0] * c1_[s] - 2 * c1_[c] + l[1] * c1_[n]) * ibl2 + d * dt * (l[2] * c1_[b] - 2 * c1_[c] + l[3] * c1_[t]) * ibl2) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; c2_[c] = (c1_[c] + d * dt * (c1_[c - 1] - 2 * c1_[c] + 0) * ibl2 + d * dt * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } /// Calculates the gradient for each box in the diffusion grid. /// The gradient is calculated in each direction (x, y, z) as following: /// /// c(x + box_length_) - c(x - box_length) / (2 * box_length_), /// /// where c(x) implies the concentration at position x /// /// At the edges the gradient is the same as the box next to it void CalculateGradient() { double gd = 1 / (box_length_ * 2); auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; #pragma omp parallel for collapse(2) for (size_t z = 0; z < nz; z++) { for (size_t y = 0; y < ny; y++) { for (size_t x = 0; x < nx; x++) { int c, e, w, n, s, b, t; c = x + y * nx + z * nx * ny; if (x == 0) { e = c; w = c + 2; } else if (x == nx - 1) { e = c - 2; w = c; } else { e = c - 1; w = c + 1; } if (y == 0) { n = c + 2 * nx; s = c; } else if (y == ny - 1) { n = c; s = c - 2 * nx; } else { n = c + nx; s = c - nx; } if (z == 0) { t = c + 2 * nx * ny; b = c; } else if (z == nz - 1) { t = c; b = c - 2 * nx * ny; } else { t = c + nx * ny; b = c - nx * ny; } // Let the gradient point from low to high concentration gradients_[3 * c + 0] = (c1_[w] - c1_[e]) * gd; gradients_[3 * c + 1] = (c1_[n] - c1_[s]) * gd; gradients_[3 * c + 2] = (c1_[t] - c1_[b]) * gd; } } } } /// Increase the concentration at specified position with specified amount void IncreaseConcentrationBy(const std::array<double, 3>& position, double amount) { auto idx = GetBoxIndex(position); IncreaseConcentrationBy(idx, amount); } /// Increase the concentration at specified box with specified amount void IncreaseConcentrationBy(size_t idx, double amount) { assert(idx < total_num_boxes_ && "Cell position is out of diffusion grid bounds"); c1_[idx] += amount; if (c1_[idx] > concentration_threshold_) { c1_[idx] = concentration_threshold_; } } /// Get the concentration at specified position double GetConcentration(const std::array<double, 3>& position) { return c1_[GetBoxIndex(position)]; } /// Get the (normalized) gradient at specified position void GetGradient(const std::array<double, 3>& position, std::array<double, 3>* gradient) { auto idx = GetBoxIndex(position); assert(idx < total_num_boxes_ && "Cell position is out of diffusion grid bounds"); (*gradient)[0] = gradients_[3 * idx]; (*gradient)[1] = gradients_[3 * idx + 1]; (*gradient)[2] = gradients_[3 * idx + 2]; auto norm = std::sqrt((*gradient)[0] * (*gradient)[0] + (*gradient)[1] * (*gradient)[1] + (*gradient)[2] * (*gradient)[2]); if (norm > 1e-10) { (*gradient)[0] /= norm; (*gradient)[1] /= norm; (*gradient)[2] /= norm; } } std::array<uint32_t, 3> GetBoxCoordinates( const std::array<double, 3>& position) const { std::array<uint32_t, 3> box_coord; box_coord[0] = (floor(position[0]) - grid_dimensions_[0]) / box_length_; box_coord[1] = (floor(position[1]) - grid_dimensions_[2]) / box_length_; box_coord[2] = (floor(position[2]) - grid_dimensions_[4]) / box_length_; return box_coord; } size_t GetBoxIndex(const std::array<uint32_t, 3>& box_coord) const { size_t ret = box_coord[2] * num_boxes_axis_[0] * num_boxes_axis_[1] + box_coord[1] * num_boxes_axis_[0] + box_coord[0]; return ret; } /// Calculates the box index of the substance at specified position size_t GetBoxIndex(const std::array<double, 3>& position) const { auto box_coord = GetBoxCoordinates(position); return GetBoxIndex(box_coord); } void SetDecayConstant(double mu) { mu_ = mu; } void SetConcentrationThreshold(double t) { concentration_threshold_ = t; } double GetConcentrationThreshold() { return concentration_threshold_; } double* GetAllConcentrations() { return c1_.data(); } double* GetAllGradients() { return gradients_.data(); } const std::array<size_t, 3>& GetNumBoxesArray() { return num_boxes_axis_; } size_t GetNumBoxes() { return total_num_boxes_; } double GetBoxLength() { return box_length_; } int GetSubstanceId() { return substance_; } std::string GetSubstanceName() { return substance_name_; } double GetDecayConstant() { return mu_; } int32_t* GetDimensionsPtr() { return grid_dimensions_.data(); } std::array<int32_t, 6>& GetDimensions() { return grid_dimensions_; } std::array<double, 7>& GetDiffusionCoefficients() { return dc_; } bool IsInitialized() { return initialized_; } int GetResolution() { return resolution_; } double GetBoxVolume() { return box_volume_; } template <typename F> void AddInitializer(F function) { initializers_.push_back(function); } private: /// The id of the substance of this grid int substance_ = 0; /// The name of the substance of this grid std::string substance_name_ = ""; /// The side length of each box double box_length_ = 0; /// the volume of each box double box_volume_ = 0; /// The array of concentration values std::vector<double> c1_ = {}; /// An extra concentration data buffer for faster value updating std::vector<double> c2_ = {}; /// The array of gradients (x, y, z) std::vector<double> gradients_ = {}; /// The maximum concentration value that a box can have double concentration_threshold_ = 1e15; /// The diffusion coefficients [cc, cw, ce, cs, cn, cb, ct] std::array<double, 7> dc_ = {{0}}; /// The decay constant double mu_ = 0; /// The grid dimensions of the diffusion grid std::array<int32_t, 6> grid_dimensions_ = {{0}}; /// The number of boxes at each axis [x, y, z] std::array<size_t, 3> num_boxes_axis_ = {{0}}; /// The total number of boxes in the diffusion grid size_t total_num_boxes_ = 0; /// Flag to determine if this grid has been initialized bool initialized_ = false; /// The resolution of the diffusion grid int resolution_ = 0; /// If false, grid dimensions are even; if true, they are odd bool parity_ = false; /// A list of functions that initialize this diffusion grid std::vector<std::function<double(double, double, double)>> initializers_ = {}; ClassDefNV(DiffusionGrid, 1); }; } // namespace bdm #endif // DIFFUSION_GRID_H_
pi_omp_teams.c
/* * Tecnologico de Costa Rica (www.tec.ac.cr) * Course: MP-6171 High Performance Embedded Systems * Developers Name: Verny Morales and Luis Carlos Alvarez * Developers email: verny.morales@gmail.com and lcam03@gmail.com * General purpose: * Input: * Output: * */ //gcc -fopenmp pi_omp_teams.c -o pi_omp_teams //./pi_omp_teams #include <omp.h> #include <stdio.h> static long num_steps = 1000000000; double step; void main () { int i; double x, pi, sum = 0.0; double start_time, run_time; int num_teams = 3; step = 1.0/(double) num_steps; start_time = omp_get_wtime(); //target -> Map variables to a device data environment and execute the construct on that device //teams distribute parallel for -> Shortcut for specifying a teams construct containing a distribute parallel worksharing-loop construct and no other statements. //num_teams -> Sets the number of teams in the current teams region //reduction -> Specifies a reduction-identifier and one or more list items // In order to specify the reduction in OpenMP, we must provide // an operation (+ / * / o) // and a reduction variable //private -> Declares list items to be private to a task //map -> Map an original list item from the current task’s data environment to a corresponding list item in the device data environment of the device identified by the construct. #pragma omp target teams distribute parallel for num_teams(num_teams) reduction(+:sum) private(x) map(tofrom:sum) for (i=0; i< num_steps; i++){ x = (i+0.5)*step; sum = sum + 4.0/(1.0+x*x); } pi = step * sum; run_time = omp_get_wtime() - start_time; printf("pi with %ld steps is %lf in %lf seconds\n", num_steps, pi,run_time); }
separation_objective.h
#pragma once #ifndef OPTIMIZATION_LIB_SEPARATION_OBJECTIVE_H #define OPTIMIZATION_LIB_SEPARATION_OBJECTIVE_H // STL includes #include <vector> // Optimization lib includes #include "../data_providers/plain_data_provider.h" #include "./dense_objective_function.h" template<Eigen::StorageOptions StorageOrder_> class Separation : public DenseObjectiveFunction<StorageOrder_> { public: /** * Public type definitions */ enum class Properties : int32_t { Delta = DenseObjectiveFunction<StorageOrder_>::Properties::Count_, ValuePerEdge }; /** * Constructors and destructor */ Separation(const std::shared_ptr<MeshDataProvider>& mesh_data_provider, const std::shared_ptr<EmptyDataProvider>& empty_data_provider) : DenseObjectiveFunction(mesh_data_provider, empty_data_provider, "Separation", 0, false) { this->Initialize(); } virtual ~Separation() { } /** * Setters */ void SetDelta(const double delta) { delta_ = delta; } bool SetProperty(const int32_t property_id, const std::any property_context, const std::any property_value) override { if(DenseObjectiveFunction<StorageOrder_>::SetProperty(property_id, property_context, property_value)) { return true; } const Properties properties = static_cast<Properties>(property_id); switch (properties) { case Properties::Delta: SetDelta(std::any_cast<const double>(property_value)); return true; } return false; } /** * Getters */ double GetDelta() const { return delta_; } bool GetProperty(const int32_t property_id, const int32_t property_modifier_id, const std::any property_context, std::any& property_value) override { if (DenseObjectiveFunction<StorageOrder_>::GetProperty(property_id, property_modifier_id, property_context, property_value)) { return true; } const Properties properties = static_cast<Properties>(property_id); switch (properties) { case Properties::Delta: property_value = GetDelta(); return true; } return false; } private: /** * Overrides */ void CalculateValue(double& f) override { EsepP = Esep * X; EsepP_squared.resize(EsepP.rows(), 2); int rows = EsepP.rows(); #pragma omp parallel for for(int i = 0; i < rows; i++) { EsepP_squared.coeffRef(i, 0) = EsepP.coeffRef(i, 0) * EsepP.coeffRef(i, 0); EsepP_squared.coeffRef(i, 1) = EsepP.coeffRef(i, 1) * EsepP.coeffRef(i, 1); } EsepP_squared_rowwise_sum = EsepP_squared.rowwise().sum(); EsepP_squared_rowwise_sum_plus_delta = EsepP_squared_rowwise_sum.array() + delta_; f_per_pair = EsepP_squared_rowwise_sum.cwiseQuotient(EsepP_squared_rowwise_sum_plus_delta); // add edge length factor f_per_pair = f_per_pair.cwiseProduct(edge_lenghts_per_pair); // sum everything up f = f_per_pair.sum(); } void CalculateValuePerVertex(Eigen::VectorXd& f_per_vertex) override { f_per_vertex.setZero(); int64_t vertex1_index; int64_t vertex2_index; #pragma omp parallel for for (int i = 0; i < Esept.outerSize(); ++i) { // no inner loop because there are only 2 nnz values per col Eigen::SparseMatrix<double>::InnerIterator it(Esept, i); int64_t vertex1_index = it.row(); int64_t vertex2_index = (++it).row(); f_per_vertex.coeffRef(vertex1_index) += EsepP_squared_rowwise_sum[i]; f_per_vertex.coeffRef(vertex2_index) += EsepP_squared_rowwise_sum[i]; } } void CalculateValuePerEdge(Eigen::VectorXd& domain_value_per_edge, Eigen::VectorXd& image_value_per_edge) override { } void CalculateGradient(Eigen::VectorXd& g) override { Eigen::MatrixX2d ge; Eigen::VectorXd d_vec = Eigen::VectorXd::Constant(EsepP_squared_rowwise_sum.rows(), delta_); Eigen::VectorXd x_plus_d = EsepP_squared_rowwise_sum + d_vec; Eigen::VectorXd d = d_vec.cwiseQuotient(x_plus_d.cwiseAbs2()); ge = 2.0 * Esept * d.cwiseProduct(edge_lenghts_per_pair).asDiagonal() * EsepP; g = Eigen::Map<Eigen::VectorXd>(ge.data(), 2.0 * ge.rows(), 1); } void PreUpdate(const Eigen::VectorXd& x) override { X = Eigen::Map<const Eigen::MatrixX2d>(x.data(), x.rows() >> 1, 2); } void PreInitialize() override { Esep4 << 1, 0, -1, 0, 0, 1, 0, -1, -1, 0, 1, 0, 0, -1, 0, 1; Esep = this->mesh_data_provider_->GetCorrespondingVertexPairsCoefficients(); Esept = Esep.transpose(); edge_lenghts_per_pair = this->mesh_data_provider_->GetCorrespondingVertexPairsEdgeLength(); } void InitializeTriplets(std::vector<Eigen::Triplet<double>>& triplets) override { const int64_t outer_size = Esept.outerSize(); triplets.reserve(10 * outer_size); auto image_vertices_count = this->mesh_data_provider_->GetImageVerticesCount(); for (int i = 0; i < outer_size; ++i) { Eigen::SparseMatrix<double>::InnerIterator it(Esept, i); int idx_xi = it.row(); int idx_xj = (++it).row(); // The indices in the small hessians are setup like this: // xi, xi+n, xj, xj+n from top to bottom and left to right // we traverse only the upper diagonal of each 4x4 hessian // and thus store 10 values, gathered in column order. // First column triplets.push_back(Eigen::Triplet<double>(idx_xi, idx_xi, 0)); // Second column triplets.push_back(Eigen::Triplet<double>(idx_xi, idx_xi + image_vertices_count, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xi + image_vertices_count, idx_xi + image_vertices_count, 0)); // Third column triplets.push_back(Eigen::Triplet<double>(idx_xi, idx_xj, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xj, idx_xi + image_vertices_count, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xj, idx_xj, 0)); // Fourth column triplets.push_back(Eigen::Triplet<double>(idx_xi, idx_xj + image_vertices_count, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xi + image_vertices_count, idx_xj + image_vertices_count, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xj, idx_xj + image_vertices_count, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xj + image_vertices_count, idx_xj + image_vertices_count, 0)); } } void CalculateRawTriplets(std::vector<Eigen::Triplet<double>>& triplets) override { // no inner loop because there are only 2 nnz values per col #pragma omp parallel for for (int i = 0; i < Esept.outerSize(); ++i) { Eigen::Vector2d xi, xj; Eigen::Matrix4d sh; int idx_xi; int idx_xj; int factor; Eigen::SparseMatrix<double>::InnerIterator it(Esept, i); idx_xi = it.row(); factor = it.value(); idx_xj = (++it).row(); xi = X.row(idx_xi); xj = X.row(idx_xj); FindSingleHessian(xi, xj, sh); sh *= factor; sh *= edge_lenghts_per_pair(i); int ind = 10 * i; for (int a = 0; a < 4; ++a) { for (int b = 0; b <= a; ++b) { const_cast<double&>(triplets[ind++].value()) = sh(b, a); } } } } /** * Private methods */ void FindSingleHessian(const Eigen::Vector2d& xi, const Eigen::Vector2d& xj, Eigen::Matrix4d& H) { bool speedup = true; Eigen::Vector2d dx = xi - xj; Eigen::Vector4d dxx; dxx << dx, -dx; double t = 0.5 * dx.squaredNorm(); double fp, fpp; fp = delta_ / ((t + delta_) * (t + delta_)); Eigen::Matrix4d Esep4; Esep4 << 1, 0, -1, 0, 0, 1, 0, -1, -1, 0, 1, 0, 0, -1, 0, 1; H = fp * Esep4; } /** * Fields */ double delta_ = 1.0; Eigen::MatrixX2d X; Eigen::SparseMatrix<double> Esep; Eigen::SparseMatrix<double> Esept; Eigen::MatrixX2d EsepP; Eigen::Matrix4d Esep4; Eigen::MatrixX2d EsepP_squared; Eigen::VectorXd f_per_pair; Eigen::VectorXd edge_lenghts_per_pair; Eigen::VectorXd EsepP_squared_rowwise_sum; Eigen::VectorXd EsepP_squared_rowwise_sum_plus_delta; }; #endif
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(8*t1+Ny+13,32)),floord(16*t2+Ny+12,32)),floord(16*t1-16*t2+Nz+Ny+11,32));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(16*t2-Nz-508,512)),ceild(32*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(8*t1+Nx+13,512)),floord(16*t2+Nx+12,512)),floord(32*t3+Nx+28,512)),floord(16*t1-16*t2+Nz+Nx+11,512));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),32*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),32*t3+30),512*t4+510),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
hello.c
/* [2018] Isaias Faria Silva All Rights Reserved. NOTICE: All information contained here in this file is the property of Isaias Faria Silva. If you want to use this, please put the contact rights information. PURPOSE:Practical activity of Parallel Programming Clermont-Auvergne University - Computer Science Institute ZZ3 - 2018 January DESCRIPTION: This is just a "hello world!" using OpenMPI INSTRUCTIONS: How to compile: gcc -fopenmp hello.c -o hello How to execute: ./hello NBOFTHREADS ps: NBOFTHREADS is optional. The defaut value is 8 threads. Last update: 15 january 2018 This project is also available at Github >> https://goo.gl/23qq51 */ #include <stdio.h> #include "omp.h" int main(int argc, char **argv) { int NBofTHREADS = 8; if(argc > 1){ NBofTHREADS = atoi(argv[1]); } printf("Before PARALLEL REGION : There are %d threads\n\n" , omp_get_num_threads()) ; //defininf the num_of_threads #pragma omp parallel num_threads(NBofTHREADS) { printf("In the PARALLEL REGION : There are %d threads\n\n" , omp_get_num_threads()) ; printf("Hello World from TID %d!\n", omp_get_thread_num()); } printf("After PARALLEL REGION : There are %d threads\n\n" , omp_get_num_threads()) ; }
axpy_ompacc3.c
// Experimental test input for Accelerator directives // simplest scalar*vector operations // Testing extensions for multiple devices // Liao 2/2/2015 //AXPY multiple GPU version, using OpenMP 4.0 standard directives // vector = vector + vector * scalar #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <assert.h> #include <omp.h> #define REAL double #define VEC_LEN 1024000 //use a fixed number for now //#define MAX_GPU_COUNT 4 // maximum GPU numbers in computation /* zero out the entire vector */ void zero(REAL *A, int n) { int i; for (i = 0; i < n; i++) { A[i] = 0.0; } } /* initialize a vector with random floating point numbers */ void init(REAL *A, int n) { int i; for (i = 0; i < n; i++) { A[i] = (REAL)drand48(); } } REAL check(REAL*A, REAL*B, int n) { int i; REAL diffsum =0.0, sum = 0.0; for (i = 0; i < n; i++) { diffsum += fabs(A[i] - B[i]); sum += fabs(B[i]); } return diffsum/sum; } /* CPU version */ void axpy(REAL* x, REAL* y, long n, REAL a) { int i; #pragma omp parallel for shared(x, y, n, a) private(i) for (i = 0; i < n; ++i) { y[i] += a * x[i]; } } int main(int argc, char *argv[]) { int n,i; REAL *y_ref, *y_ompacc, *x; REAL a = 123.456f; n = VEC_LEN; if (argc >= 2) n = atoi(argv[1]); y_ref = (REAL *) malloc(n * sizeof(REAL)); y_ompacc = (REAL *) malloc(n * sizeof(REAL)); x = (REAL *) malloc(n * sizeof(REAL)); srand48(1<<12); init(x, n); init(y_ref, n); memcpy(y_ompacc, y_ref, n*sizeof(REAL)); // int GPU_N = 0; // Transformation point: obtain the number of devices to be used by default int GPU_N = xomp_get_num_devices(); #if 0 cudaGetDeviceCount(&GPU_N); if (GPU_N > MAX_GPU_COUNT) { GPU_N = MAX_GPU_COUNT; } #endif printf("CUDA-capable device count: %i\n", GPU_N); // preparation for multiple GPUs // Transformation point: set first level thread count to be GPU count used omp_set_num_threads(GPU_N); #pragma omp parallel shared (GPU_N,x , y_ompacc, n) private(i) { int tid = omp_get_thread_num(); // cudaSetDevice(tid); xomp_set_default_device (tid); long size, offset; #if 0 int size = n / GPU_N; int offset = size * tid; if(tid < n%GPU_N) { size++; } if(tid >= n%GPU_N) offset += n%GPU_N; else offset += tid; #endif XOMP_static_even_divide (0, n, GPU_N, tid, &offset, &size); printf("thread %d working on GPU devices %d with size %d copying data from y_ompacc with offset %d\n",tid, tid, size,offset); int j; #pragma omp target device (tid) map(tofrom: y_ompacc[offset:size]) map(to: x[offset:size],a,size, offset) #pragma omp parallel for shared(size, a) private(j) for (j = offset; j < offset+size; ++j) { y_ompacc[j] += a * x[j]; } } int num_threads; #pragma omp parallel shared (num_threads) { if (omp_get_thread_num() == 0) num_threads = omp_get_num_threads(); } // serial version axpy(x, y_ref, n, a); REAL checksum = check(y_ref, y_ompacc, n); printf("axpy(%d): checksum: %g\n", n, checksum); assert (checksum < 1.0e-10); free(y_ref); free(y_ompacc); free(x); return 0; }
zunmlq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_unmlq * * Overwrites the general complex m-by-n matrix C with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * C C * Q * trans = Plasma_ConjTrans Q^H * C C * Q^H * * where Q is an orthogonal (or unitary) matrix defined as the product of k * elementary reflectors * * Q = H(1) H(2) . . . H(k) * * as returned by plasma_zgelqf. Q is of order m if side = PlasmaLeft * and of order n if side = PlasmaRight. * ******************************************************************************* * * @param[in] side * Intended usage: * - PlasmaLeft: apply Q or Q^H from the left; * - PlasmaRight: apply Q or Q^H from the right. * * @param[in] trans * Intended usage: * - PlasmaNoTrans: apply Q; * - Plasma_ConjTrans: apply Q^H. * * @param[in] m * The number of rows of the matrix C. m >= 0. * * @param[in] n * The number of columns of the matrix C. n >= 0. * * @param[in] k * The number of rows of elementary tile reflectors whose product * defines the matrix Q. * If side == PlasmaLeft, m >= k >= 0. * If side == PlasmaRight, n >= k >= 0. * * @param[in] pA * Details of the LQ factorization of the original matrix A as returned * by plasma_zgelqf. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,k). * * @param[in] T * Auxiliary factorization data, computed by plasma_zgelqf. * * @param[in,out] pC * On entry, pointer to the m-by-n matrix C. * On exit, C is overwritten by Q*C, Q^H*C, C*Q, or C*Q^H. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_zunmlq * @sa plasma_cunmlq * @sa plasma_dormlq * @sa plasma_sormlq * @sa plasma_zgelqf * ******************************************************************************/ int plasma_zunmlq(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, plasma_complex64_t *pA, int lda, plasma_desc_t T, plasma_complex64_t *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("illegal value of side"); return -1; } if ((trans != Plasma_ConjTrans) && (trans != PlasmaNoTrans)) { plasma_error("illegal value of trans"); return -2; } if (m < 0) { plasma_error("illegal value of m"); return -3; } if (n < 0) { plasma_error("illegal value of n"); return -4; } int an; if (side == PlasmaLeft) { an = m; } else { an = n; } if ((k < 0) || (k > an)) { plasma_error("illegal value of k"); return -5; } if (lda < imax(1, k)) { plasma_error("illegal value of lda"); return -7; } if (ldc < imax(1, m)) { plasma_error("illegal value of ldc"); return -10; } // quick return if (m == 0 || n == 0 || k == 0) return PlasmaSuccess; // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, k, an, 0, 0, k, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = ib*nb; // unmlq: work retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); plasma_omp_zge2desc(pC, ldc, C, sequence, &request); // Call the tile async function. plasma_omp_zunmlq(side, trans, A, T, C, work, sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(C, pC, ldc, sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&C); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * * @ingroup plasma_unmlq * * Non-blocking tile version of plasma_zunmlq(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] side * Intended usage: * - PlasmaLeft: apply Q or Q^H from the left; * - PlasmaRight: apply Q or Q^H from the right. * * @param[in] trans * Intended usage: * - PlasmaNoTrans: apply Q; * - Plasma_ConjTrans: apply Q^H. * * @param[in] A * Descriptor of matrix A stored in the tile layout. * Details of the QR factorization of the original matrix A as returned * by plasma_zgeqrf. * * @param[in] T * Descriptor of matrix T. * Auxiliary factorization data, computed by plasma_zgeqrf. * * @param[in,out] C * Descriptor of matrix C. * On entry, the m-by-n matrix C. * On exit, C is overwritten by Q*C, Q^H*C, C*Q, or C*Q^H. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For multiplication by Q contains preallocated space for work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zunmlq * @sa plasma_omp_cunmlq * @sa plasma_omp_dormlq * @sa plasma_omp_sormlq * @sa plasma_omp_zgelqf * ******************************************************************************/ void plasma_omp_zunmlq(plasma_enum_t side, plasma_enum_t trans, plasma_desc_t A, plasma_desc_t T, plasma_desc_t C, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("invalid value of side"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((trans != Plasma_ConjTrans) && (trans != PlasmaNoTrans)) { plasma_error("invalid value of trans"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (C.m == 0 || C.n == 0 || A.m == 0 || A.n == 0) return; // Call the parallel function. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmlq_tree(side, trans, A, T, C, work, sequence, request); } else { plasma_pzunmlq(side, trans, A, T, C, work, sequence, request); } }
ztrsm.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_trsm * * Solves one of the matrix equations * * \f[ op( A ) \times X = \alpha B, \f] or * \f[ X \times op( A ) = \alpha B, \f] * * where op( A ) is one of: * \f[ op( A ) = A, \f] * \f[ op( A ) = A^T, \f] * \f[ op( A ) = A^H, \f] * * alpha is a scalar, X and B are m-by-n matrices, and * A is a unit or non-unit, upper or lower triangular matrix. * The matrix X overwrites B. * ******************************************************************************* * * @param[in] side * - PlasmaLeft: op(A)*X = B, * - PlasmaRight: X*op(A) = B. * * @param[in] uplo * - PlasmaUpper: A is upper triangular, * - PlasmaLower: A is lower triangular. * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] diag * - PlasmaNonUnit: A has non-unit diagonal, * - PlasmaUnit: A has unit diagonal. * * @param[in] m * The number of rows of the matrix B. m >= 0. * * @param[in] n * The number of columns of the matrix B. n >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] pA * The k-by-k triangular matrix, * where k = m if side = PlasmaLeft, * and k = n if side = PlasmaRight. * If uplo = PlasmaUpper, the leading k-by-k upper triangular part * of the array A contains the upper triangular matrix, and the * strictly lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading k-by-k lower triangular part * of the array A contains the lower triangular matrix, and the * strictly upper triangular part of A is not referenced. * If diag = PlasmaUnit, the diagonal elements of A are also not * referenced and are assumed to be 1. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,k). * * @param[in,out] pB * On entry, the m-by-n right hand side matrix B. * On exit, if return value = 0, the m-by-n solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,m). * * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_ztrsm * @sa plasma_ctrsm * @sa plasma_dtrsm * @sa plasma_strsm * ******************************************************************************/ int plasma_ztrsm(plasma_enum_t side, plasma_enum_t uplo, plasma_enum_t transa, plasma_enum_t diag, int m, int n, plasma_complex64_t alpha, plasma_complex64_t *pA, int lda, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("illegal value of side"); return -1; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -2; } if ((transa != PlasmaConjTrans) && (transa != PlasmaNoTrans) && (transa != PlasmaTrans )) { plasma_error("illegal value of transa"); return -3; } if ((diag != PlasmaUnit) && (diag != PlasmaNonUnit)) { plasma_error("illegal value of diag"); return -4; } if (m < 0) { plasma_error("illegal value of m"); return -5; } if (n < 0) { plasma_error("illegal value of n"); return -6; } int an; if (side == PlasmaLeft) an = m; else an = n; if (lda < imax(1, an)) { plasma_error("illegal value of lda"); return -8; } if (ldb < imax(1, m)) { plasma_error("illegal value of ldb"); return -10; } // quick return if ((m == 0) || (n == 0)) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, an, an, 0, 0, an, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); plasma_omp_zge2desc(pB, ldb, B, sequence, &request); // Call the tile async function. plasma_omp_ztrsm(side, uplo, transa, diag, alpha, A, B, sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * * @ingroup plasma_trsm * * Computes triangular solve. * Non-blocking tile version of plasma_ztrsm(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] side * - PlasmaLeft: op(A)*X = B, * - PlasmaRight: X*op(A) = B. * * @param[in] uplo * - PlasmaUpper: A is upper triangular, * - PlasmaLower: A is lower triangular. * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] diag * - PlasmaNonUnit: A has non-unit diagonal, * - PlasmaUnit: A has unit diagonal. * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * * @param[in] B * Descriptor of matrix B. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_ztrsm * @sa plasma_omp_ctrsm * @sa plasma_omp_dtrsm * @sa plasma_omp_strsm * ******************************************************************************/ void plasma_omp_ztrsm(plasma_enum_t side, plasma_enum_t uplo, plasma_enum_t transa, plasma_enum_t diag, plasma_complex64_t alpha, plasma_desc_t A, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("illegal value of side"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((transa != PlasmaConjTrans) && (transa != PlasmaNoTrans) && (transa != PlasmaTrans)) { plasma_error("illegal value of transa"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((diag != PlasmaUnit) && (diag != PlasmaNonUnit)) { plasma_error("illegal value of diag"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if ((B.m == 0) || (B.n == 0)) return; // Call the parallel function. plasma_pztrsm(side, uplo, transa, diag, alpha, A, B, sequence, request); }
CGOpenMPRuntime.h
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #include "CGValue.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/IR/Function.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/AtomicOrdering.h" namespace llvm { class ArrayType; class Constant; class FunctionType; class GlobalVariable; class StructType; class Type; class Value; } // namespace llvm namespace clang { class Expr; class OMPDependClause; class OMPExecutableDirective; class OMPLoopDirective; class VarDecl; class OMPDeclareReductionDecl; class IdentifierInfo; namespace CodeGen { class Address; class CodeGenFunction; class CodeGenModule; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class PrePostActionTy { public: explicit PrePostActionTy() {} virtual void Enter(CodeGenFunction &CGF) {} virtual void Exit(CodeGenFunction &CGF) {} virtual ~PrePostActionTy() {} }; /// Class provides a way to call simple version of codegen for OpenMP region, or /// an advanced with possible pre|post-actions in codegen. class RegionCodeGenTy final { intptr_t CodeGen; typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &); CodeGenTy Callback; mutable PrePostActionTy *PrePostAction; RegionCodeGenTy() = delete; RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete; template <typename Callable> static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF, PrePostActionTy &Action) { return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action); } public: template <typename Callable> RegionCodeGenTy( Callable &&CodeGen, std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>, RegionCodeGenTy>::value> * = nullptr) : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)), Callback(CallbackFn<std::remove_reference_t<Callable>>), PrePostAction(nullptr) {} void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; } void operator()(CodeGenFunction &CGF) const; }; struct OMPTaskDataTy final { SmallVector<const Expr *, 4> PrivateVars; SmallVector<const Expr *, 4> PrivateCopies; SmallVector<const Expr *, 4> FirstprivateVars; SmallVector<const Expr *, 4> FirstprivateCopies; SmallVector<const Expr *, 4> FirstprivateInits; SmallVector<const Expr *, 4> LastprivateVars; SmallVector<const Expr *, 4> LastprivateCopies; SmallVector<const Expr *, 4> ReductionVars; SmallVector<const Expr *, 4> ReductionCopies; SmallVector<const Expr *, 4> ReductionOps; struct DependData { OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; const Expr *IteratorExpr = nullptr; SmallVector<const Expr *, 4> DepExprs; explicit DependData() = default; DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr) : DepKind(DepKind), IteratorExpr(IteratorExpr) {} }; SmallVector<DependData, 4> Dependences; llvm::PointerIntPair<llvm::Value *, 1, bool> Final; llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule; llvm::PointerIntPair<llvm::Value *, 1, bool> Priority; llvm::Value *Reductions = nullptr; unsigned NumberOfParts = 0; bool Tied = true; bool Nogroup = false; }; /// Class intended to support codegen of all kind of the reduction clauses. class ReductionCodeGen { private: /// Data required for codegen of reduction clauses. struct ReductionData { /// Reference to the item shared between tasks to reduce into. const Expr *Shared = nullptr; /// Reference to the original item. const Expr *Ref = nullptr; /// Helper expression for generation of private copy. const Expr *Private = nullptr; /// Helper expression for generation reduction operation. const Expr *ReductionOp = nullptr; ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private, const Expr *ReductionOp) : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) { } }; /// List of reduction-based clauses. SmallVector<ReductionData, 4> ClausesData; /// List of addresses of shared variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses; /// List of addresses of original variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses; /// Sizes of the reduction items in chars. SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes; /// Base declarations for the reduction items. SmallVector<const VarDecl *, 4> BaseDecls; /// Emits lvalue for shared expression. LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E); /// Emits upper bound for shared expression (if array section). LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E); /// Performs aggregate initialization. /// \param N Number of reduction item in the common list. /// \param PrivateAddr Address of the corresponding private item. /// \param SharedLVal Address of the original shared variable. /// \param DRD Declare reduction construct used for reduction item. void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, const OMPDeclareReductionDecl *DRD); public: ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> ReductionOps); /// Emits lvalue for the shared and original reduction item. /// \param N Number of the reduction item. void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. void emitAggregateType(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. /// \param Size Size of the type in chars. void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size); /// Performs initialization of the private copy for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. /// \param DefaultInit Default initialization sequence that should be /// performed if no reduction specific initialization is found. /// \param SharedLVal Address of the original shared variable. void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, llvm::function_ref<bool(CodeGenFunction &)> DefaultInit); /// Returns true if the private copy requires cleanups. bool needCleanups(unsigned N); /// Emits cleanup code for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Adjusts \p PrivatedAddr for using instead of the original variable /// address in normal operations. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Returns LValue for the reduction item. LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; } /// Returns LValue for the original reduction item. LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; } /// Returns the size of the reduction item (in chars and total number of /// elements in the item), or nullptr, if the size is a constant. std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const { return Sizes[N]; } /// Returns the base declaration of the reduction item. const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; } /// Returns the base declaration of the reduction item. const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; } /// Returns true if the initialization of the reduction item uses initializer /// from declare reduction construct. bool usesReductionInitializer(unsigned N) const; }; class CGOpenMPRuntime { public: /// Allows to disable automatic handling of functions used in target regions /// as those marked as `omp declare target`. class DisableAutoDeclareTargetRAII { CodeGenModule &CGM; bool SavedShouldMarkAsGlobal; public: DisableAutoDeclareTargetRAII(CodeGenModule &CGM); ~DisableAutoDeclareTargetRAII(); }; /// Manages list of nontemporal decls for the specified directive. class NontemporalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S); ~NontemporalDeclsRAII(); }; /// Maps the expression for the lastprivate variable to the global copy used /// to store new value because original variables are not mapped in inner /// parallel regions. Only private copies are captured but we need also to /// store private copy in shared address. /// Also, stores the expression for the private loop counter and it /// threaprivate name. struct LastprivateConditionalData { llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>> DeclToUniqueName; LValue IVLVal; llvm::Function *Fn = nullptr; bool Disabled = false; }; /// Manages list of lastprivate conditional decls for the specified directive. class LastprivateConditionalRAII { enum class ActionToDo { DoNotPush, PushAsLastprivateConditional, DisableLastprivateConditional, }; CodeGenModule &CGM; ActionToDo Action = ActionToDo::DoNotPush; /// Check and try to disable analysis of inner regions for changes in /// lastprivate conditional. void tryToDisableInnerAnalysis(const OMPExecutableDirective &S, llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled) const; LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S); public: explicit LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal); static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S); ~LastprivateConditionalRAII(); }; protected: CodeGenModule &CGM; StringRef FirstSeparator, Separator; /// Constructor allowing to redefine the name separator for the variables. explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator); /// Creates offloading entry for the provided entry ID \a ID, /// address \a Addr, size \a Size, and flags \a Flags. virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Helper to emit outlined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Lambda codegen specific to an accelerator device. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags = 0); /// Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); /// Gets thread id value for the current thread. /// llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// Get the function name of an outlined region. // The name can be customized depending on the target. // virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; } /// Emits \p Callee function call with arguments \p Args with location \p Loc. void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits address of the word in a memory where current thread id is /// stored. virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint = false); void clearLocThreadIdInsertPt(CodeGenFunction &CGF); /// Check if the default location must be constant. /// Default is false to support OMPT/OMPD. virtual bool isDefaultLocationConstant() const { return false; } /// Returns additional flags that can be stored in reserved_2 field of the /// default location. virtual unsigned getDefaultLocationReserved2Flags() const { return 0; } /// Returns default flags for the barriers depending on the directive, for /// which this barier is going to be emitted. static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind); /// Get the LLVM type for the critical name. llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;} /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// llvm::Value *getCriticalRegionLock(StringRef CriticalName); private: /// Default const ident_t object used for initialization of all other /// ident_t objects. llvm::Constant *DefaultOpenMPPSource = nullptr; using FlagsTy = std::pair<unsigned, unsigned>; /// Map of flags and corresponding default locations. using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>; OpenMPDefaultLocMapTy OpenMPDefaultLocMap; Address getOrCreateDefaultLocation(unsigned Flags); QualType IdentQTy; llvm::StructType *IdentTy = nullptr; /// Map for SourceLocation and OpenMP runtime library debug locations. typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy; OpenMPDebugLocMapTy OpenMPDebugLocMap; /// The type for a microtask which gets passed to __kmpc_fork_call(). /// Original representation is: /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...); llvm::FunctionType *Kmpc_MicroTy = nullptr; /// Stores debug location and ThreadID for the function. struct DebugLocThreadIdTy { llvm::Value *DebugLoc; llvm::Value *ThreadID; /// Insert point for the service instructions. llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr; }; /// Map of local debug location, ThreadId and functions. typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy> OpenMPLocThreadIDMapTy; OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap; /// Map of UDRs and corresponding combiner/initializer. typedef llvm::DenseMap<const OMPDeclareReductionDecl *, std::pair<llvm::Function *, llvm::Function *>> UDRMapTy; UDRMapTy UDRMap; /// Map of functions and locally defined UDRs. typedef llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareReductionDecl *, 4>> FunctionUDRMapTy; FunctionUDRMapTy FunctionUDRMap; /// Map from the user-defined mapper declaration to its corresponding /// functions. llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap; /// Map of functions and their local user-defined mappers. using FunctionUDMMapTy = llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareMapperDecl *, 4>>; FunctionUDMMapTy FunctionUDMMap; /// Maps local variables marked as lastprivate conditional to their internal /// types. llvm::DenseMap<llvm::Function *, llvm::DenseMap<CanonicalDeclPtr<const Decl>, std::tuple<QualType, const FieldDecl *, const FieldDecl *, LValue>>> LastprivateConditionalToTypes; /// Type kmp_critical_name, originally defined as typedef kmp_int32 /// kmp_critical_name[8]; llvm::ArrayType *KmpCriticalNameTy; /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator> InternalVars; /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); llvm::Type *KmpRoutineEntryPtrTy = nullptr; QualType KmpRoutineEntryPtrQTy; /// Type typedef struct kmp_task { /// void * shareds; /**< pointer to block of pointers to /// shared vars */ /// kmp_routine_entry_t routine; /**< pointer to routine to call for /// executing task */ /// kmp_int32 part_id; /**< part id for the task */ /// kmp_routine_entry_t destructors; /* pointer to function to invoke /// deconstructors of firstprivate C++ objects */ /// } kmp_task_t; QualType KmpTaskTQTy; /// Saved kmp_task_t for task directive. QualType SavedKmpTaskTQTy; /// Saved kmp_task_t for taskloop-based directive. QualType SavedKmpTaskloopTQTy; /// Type typedef struct kmp_depend_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool in:1; /// bool out:1; /// } flags; /// } kmp_depend_info_t; QualType KmpDependInfoTy; /// struct kmp_dim { // loop bounds info casted to kmp_int64 /// kmp_int64 lo; // lower /// kmp_int64 up; // upper /// kmp_int64 st; // stride /// }; QualType KmpDimTy; /// Type struct __tgt_offload_entry{ /// void *addr; // Pointer to the offload entry info. /// // (function or global) /// char *name; // Name of the function or global. /// size_t size; // Size of the entry info (0 if it a function). /// int32_t flags; /// int32_t reserved; /// }; QualType TgtOffloadEntryQTy; /// Entity that registers the offloading constants that were emitted so /// far. class OffloadEntriesInfoManagerTy { CodeGenModule &CGM; /// Number of entries registered so far. unsigned OffloadingEntriesNum = 0; public: /// Base class of the entries info. class OffloadEntryInfo { public: /// Kind of a given entry. enum OffloadingEntryInfoKinds : unsigned { /// Entry is a target region. OffloadingEntryInfoTargetRegion = 0, /// Entry is a declare target variable. OffloadingEntryInfoDeviceGlobalVar = 1, /// Invalid entry info. OffloadingEntryInfoInvalid = ~0u }; protected: OffloadEntryInfo() = delete; explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {} explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags) : Flags(Flags), Order(Order), Kind(Kind) {} ~OffloadEntryInfo() = default; public: bool isValid() const { return Order != ~0u; } unsigned getOrder() const { return Order; } OffloadingEntryInfoKinds getKind() const { return Kind; } uint32_t getFlags() const { return Flags; } void setFlags(uint32_t NewFlags) { Flags = NewFlags; } llvm::Constant *getAddress() const { return cast_or_null<llvm::Constant>(Addr); } void setAddress(llvm::Constant *V) { assert(!Addr.pointsToAliveValue() && "Address has been set before!"); Addr = V; } static bool classof(const OffloadEntryInfo *Info) { return true; } private: /// Address of the entity that has to be mapped for offloading. llvm::WeakTrackingVH Addr; /// Flags associated with the device global. uint32_t Flags = 0u; /// Order this entry was emitted. unsigned Order = ~0u; OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid; }; /// Return true if a there are no entries defined. bool empty() const; /// Return number of entries defined so far. unsigned size() const { return OffloadingEntriesNum; } OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {} // // Target region entries related. // /// Kind of the target registry entry. enum OMPTargetRegionEntryKind : uint32_t { /// Mark the entry as target region. OMPTargetRegionEntryTargetRegion = 0x0, /// Mark the entry as a global constructor. OMPTargetRegionEntryCtor = 0x02, /// Mark the entry as a global destructor. OMPTargetRegionEntryDtor = 0x04, }; /// Target region entries info. class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo { /// Address that can be used as the ID of the entry. llvm::Constant *ID = nullptr; public: OffloadEntryInfoTargetRegion() : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {} explicit OffloadEntryInfoTargetRegion(unsigned Order, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags), ID(ID) { setAddress(Addr); } llvm::Constant *getID() const { return ID; } void setID(llvm::Constant *V) { assert(!ID && "ID has been set before!"); ID = V; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoTargetRegion; } }; /// Initialize target region entry. void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order); /// Register target region entry. void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags); /// Return true if a target region entry with the provided information /// exists. bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum) const; /// brief Applies action \a Action on all registered entries. typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy; void actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action); // // Device global variable entries related. // /// Kind of the global variable entry.. enum OMPTargetGlobalVarEntryKind : uint32_t { /// Mark the entry as a to declare target. OMPTargetGlobalVarEntryTo = 0x0, /// Mark the entry as a to declare target link. OMPTargetGlobalVarEntryLink = 0x1, }; /// Device global variable entries info. class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo { /// Type of the global variable. CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; public: OffloadEntryInfoDeviceGlobalVar() : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {} explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {} explicit OffloadEntryInfoDeviceGlobalVar( unsigned Order, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags), VarSize(VarSize), Linkage(Linkage) { setAddress(Addr); } CharUnits getVarSize() const { return VarSize; } void setVarSize(CharUnits Size) { VarSize = Size; } llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; } void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar; } }; /// Initialize device global variable entry. void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order); /// Register device global variable entry. void registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Checks if the variable with the given name has been registered already. bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const { return OffloadEntriesDeviceGlobalVar.count(VarName) > 0; } /// Applies action \a Action on all registered entries. typedef llvm::function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy; void actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action); private: // Storage for target region entries kind. The storage is to be indexed by // file ID, device ID, parent function name and line number. typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion> OffloadEntriesTargetRegionPerLine; typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine> OffloadEntriesTargetRegionPerParentName; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName> OffloadEntriesTargetRegionPerFile; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile> OffloadEntriesTargetRegionPerDevice; typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy; OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion; /// Storage for device global variable entries kind. The storage is to be /// indexed by mangled name. typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar> OffloadEntriesDeviceGlobalVarTy; OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar; }; OffloadEntriesInfoManagerTy OffloadEntriesInfoManager; bool ShouldMarkAsGlobal = true; /// List of the emitted declarations. llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls; /// List of the global variables with their addresses that should not be /// emitted for the target. llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables; /// List of variables that can become declare target implicitly and, thus, /// must be emitted. llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables; using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>; /// Stack for list of declarations in current context marked as nontemporal. /// The set is the union of all current stack elements. llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack; /// Stack for list of addresses of declarations in current context marked as /// lastprivate conditional. The set is the union of all current stack /// elements. llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack; /// Flag for keeping track of weather a requires unified_shared_memory /// directive is present. bool HasRequiresUnifiedSharedMemory = false; /// Atomic ordering from the omp requires directive. llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic; /// Flag for keeping track of weather a target region has been emitted. bool HasEmittedTargetRegion = false; /// Flag for keeping track of weather a device routine has been emitted. /// Device routines are specific to the bool HasEmittedDeclareTargetRegion = false; /// Loads all the offload entries information from the host IR /// metadata. void loadOffloadInfoMetadata(); /// Returns __tgt_offload_entry type. QualType getTgtOffloadEntryQTy(); /// Start scanning from statement \a S and and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName); /// Build type kmp_routine_entry_t (if not built yet). void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// Returns pointer to kmpc_micro type. llvm::Type *getKmpc_MicroPointerTy(); /// Returns specified OpenMP runtime function. /// \param Function OpenMP runtime function. /// \return Specified function. llvm::FunctionCallee createRuntimeFunction(unsigned Function); /// Returns __kmpc_for_static_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_next_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_fini_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of /// storage that's reserved for use by the OpenMP runtime. /// \param VD Threadprivate variable. /// \return Cache variable for the specified threadprivate. llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace = 0); /// Set of threadprivate variables with the generated initializer. llvm::StringSet<> ThreadPrivateWithDefinition; /// Set of declare target variables with the generated initializer. llvm::StringSet<> DeclareTargetWithDefinition; /// Emits initialization code for the threadprivate variables. /// \param VDAddr Address of the global variable \a VD. /// \param Ctor Pointer to a global init function for \a VD. /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc); /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *BasePtr, llvm::Value *Ptr, llvm::Value *Size, llvm::Value *MapType, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit); struct TaskResultTy { llvm::Value *NewTask = nullptr; llvm::Function *TaskEntry = nullptr; llvm::Value *NewTaskNewTaskTTy = nullptr; LValue TDBase; const RecordDecl *KmpTaskTQTyRD = nullptr; llvm::Value *TaskDupFn = nullptr; }; /// Emit task region for the task directive. The task region is emitted in /// several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data); /// Returns default address space for the constant firstprivates, 0 by /// default. virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; } /// Emit code that pushes the trip count of loops associated with constructs /// 'target teams distribute' and 'teams distribute parallel for'. /// \param SizeEmitter Emits the int64 value for the number of iterations of /// the associated loop. void emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit update for lastprivate conditional data. void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc); /// Returns the number of the elements and the address of the depobj /// dependency array. /// \return Number of elements in depobj array and the pointer to the array of /// dependencies. std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); public: explicit CGOpenMPRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM, ".", ".") {} virtual ~CGOpenMPRuntime() {} virtual void clear(); /// Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// ThenGen(); /// } else { /// ElseGen(); /// } void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen); /// Checks if the \p Body is the \a CompoundStmt and returns its child /// statement iff there is only one that is not evaluatable at the compile /// time. static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body); /// Get the platform-specific name separator. std::string getName(ArrayRef<StringRef> Parts) const; /// Emit code for the specified user defined reduction construct. virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D); /// Get combiner/initializer for the specified user-defined reduction, if any. virtual std::pair<llvm::Function *, llvm::Function *> getUserDefinedReduction(const OMPDeclareReductionDecl *D); /// Emit the function for the user defined mapper construct. void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF = nullptr); /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// virtual llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts); /// Cleans up references to the objects in finished function. /// virtual void functionFinished(CodeGenFunction &CGF); /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond); /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr); /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc); /// Emits code for a taskyield directive. virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc); /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps); /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads); /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false); /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of distribute directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static chunked. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is dynamic. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; /// struct with the values to be passed to the dispatch runtime function struct DispatchRTInput { /// Loop lower bound llvm::Value *LB = nullptr; /// Loop upper bound llvm::Value *UB = nullptr; /// Chunk size specified using 'schedule' clause (nullptr if chunk /// was not specified) llvm::Value *Chunk = nullptr; DispatchRTInput() = default; DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) : LB(LB), UB(UB), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues); /// Struct with the values to be passed to the static runtime function struct StaticRTInput { /// Size of the iteration variable in bits. unsigned IVSize = 0; /// Sign of the iteration variable. bool IVSigned = false; /// true if loop is ordered, false otherwise. bool Ordered = false; /// Address of the output variable in which the flag of the last iteration /// is returned. Address IL = Address::invalid(); /// Address of the output variable in which the lower iteration number is /// returned. Address LB = Address::invalid(); /// Address of the output variable in which the upper iteration number is /// returned. Address UB = Address::invalid(); /// Address of the output variable in which the stride value is returned /// necessary to generated the static_chunked scheduled loop. Address ST = Address::invalid(); /// Value of the chunk for the static_chunked scheduled loop. For the /// default (nullptr) value, the chunk 1 will be used. llvm::Value *Chunk = nullptr; StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL, Address LB, Address UB, Address ST, llvm::Value *Chunk = nullptr) : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB), UB(UB), ST(ST), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values); /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values); /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned); /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind); /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST); /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc); /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr); /// Emit a code for initialization of declare target variable. /// \param VD Declare target variable. /// \param Addr Address of the global variable \a VD. /// \param PerformInit true if initialization expression is not constant. virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit); /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name); /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO); /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit code for the directive that does not require outlining. /// /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param HasCancel true if region has inner cancel directive, false /// otherwise. virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel = false); /// Emits reduction function. /// \param ArgsType Array type containing pointers to reduction variables. /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. llvm::Function *emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps); /// Emits single reduction combiner void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS); struct ReductionOptionsTy { bool WithNowait; bool SimpleReduction; OpenMPDirectiveKind ReductionKind; }; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options); /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _task_red_item_t red_data[n]; /// ... /// red_data[i].shar = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data); /// \endcode /// /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data); /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N); /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal); /// Emit code for 'taskwait' directive. virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion); /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion); /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. /// \param SizeEmitter Callback to emit number of iterations for loop-based /// directives. virtual void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. virtual bool emitTargetFunctions(GlobalDecl GD); /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. virtual bool emitTargetGlobalVariable(GlobalDecl GD); /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr); /// Registers provided target firstprivate variable as global on the /// target. llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF, const VarDecl *VD); /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. virtual bool emitTargetGlobal(GlobalDecl GD); /// Creates and returns a registration function for when at least one /// requires directives was used in the current module. llvm::Function *emitRequiresDirectiveRegFun(); /// Creates all the offload entries in the current compilation unit /// along with the associated metadata. void createOffloadEntriesAndInfoMetadata(); /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars); /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc); /// Struct that keeps all the relevant information that should be kept /// throughout a 'target data' region. class TargetDataInfo { /// Set to true if device pointer information have to be obtained. bool RequiresDevicePointerInfo = false; public: /// The array of base pointer passed to the runtime library. llvm::Value *BasePointersArray = nullptr; /// The array of section pointers passed to the runtime library. llvm::Value *PointersArray = nullptr; /// The array of sizes passed to the runtime library. llvm::Value *SizesArray = nullptr; /// The array of map types passed to the runtime library. llvm::Value *MapTypesArray = nullptr; /// The total number of pointers passed to the runtime library. unsigned NumberOfPtrs = 0u; /// Map between the a declaration of a capture and the corresponding base /// pointer address where the runtime returns the device pointers. llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap; explicit TargetDataInfo() {} explicit TargetDataInfo(bool RequiresDevicePointerInfo) : RequiresDevicePointerInfo(RequiresDevicePointerInfo) {} /// Clear information about the data arrays. void clearArrayInfo() { BasePointersArray = nullptr; PointersArray = nullptr; SizesArray = nullptr; MapTypesArray = nullptr; NumberOfPtrs = 0u; } /// Return true if the current target data information has valid arrays. bool isValid() { return BasePointersArray && PointersArray && SizesArray && MapTypesArray && NumberOfPtrs; } bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; } }; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info); /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device); /// Marks function \a Fn with properly mangled versions of vector functions. /// \param FD Function marked as 'declare simd'. /// \param Fn LLVM function that must be marked with 'declare simd' /// attributes. virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn); /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations); /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C); /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. virtual const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { return NativeParam; } /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const; /// Choose default schedule type and chunk value for the /// dist_schedule clause. virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const {} /// Choose default schedule type and chunk value for the /// schedule clause. virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const; /// Emits call of the outlined function with the provided arguments, /// translating these arguments to correct target-specific arguments. virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits OpenMP-specific function prolog. /// Required for device constructs. virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D); /// Gets the OpenMP-specific address of the local variable. virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Marks the declaration as already emitted for the device code and returns /// true, if it was marked already, and false, otherwise. bool markAsGlobalTarget(GlobalDecl GD); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; /// Adjust some parameters for the target-based directives, like addresses of /// the variables captured by reference in lambdas. virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const; /// Perform check on requires decl to ensure that target architecture /// supports unified addressing virtual void processRequiresDirective(const OMPRequiresDecl *D); /// Gets default memory ordering as specified in requires directive. llvm::AtomicOrdering getDefaultMemoryOrdering() const; /// Checks if the variable has associated OMPAllocateDeclAttr attribute with /// the predefined allocator and translates it into the corresponding address /// space. virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS); /// Return whether the unified_shared_memory has been specified. bool hasRequiresUnifiedSharedMemory() const; /// Checks if the \p VD variable is marked as nontemporal declaration in /// current context. bool isNontemporalDecl(const ValueDecl *VD) const; /// Create specialized alloca to handle lastprivate conditionals. Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD); /// Checks if the provided \p LVal is lastprivate conditional and emits the /// code to update the value of the original variable. /// \code /// lastprivate(conditional: a) /// ... /// <type> a; /// lp_a = ...; /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// \endcode virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS); /// Checks if the lastprivate conditional was updated in inner region and /// writes the value. /// \code /// lastprivate(conditional: a) /// ... /// <type> a;bool Fired = false; /// #pragma omp ... shared(a) /// { /// lp_a = ...; /// Fired = true; /// } /// if (Fired) { /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// Fired = false; /// } /// \endcode virtual void checkAndEmitSharedLastprivateConditional( CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls); /// Gets the address of the global copy used for lastprivate conditional /// update, if any. /// \param PrivLVal LValue for the private copy. /// \param VD Original lastprivate declaration. virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs). /// \returns Pointer to the first element of the array casted to VoidPtr type. std::pair<llvm::Value *, Address> emitDependClause(CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs) for depobj construct. In this case, the /// variable is allocated in dynamically. \returns Pointer to the first /// element of the array casted to VoidPtr type. Address emitDepobjDependClause(CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc); /// Emits the code to destroy the dependency object provided in depobj /// directive. void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); /// Updates the dependency kind in the specified depobj object. /// \param DepobjLVal LValue for the main depobj object. /// \param NewDepKind New dependency kind. void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc); }; /// Class supports emissionof SIMD-only code. class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { public: explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {} ~CGOpenMPSIMDRuntime() override {} /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override; /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) override; /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr) override; /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override; /// Emits code for a taskyield directive. void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override; /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) override; /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override; /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false) override; /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override; /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override; /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override; /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override; /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override; /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override; /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr) override; /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override; /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO) override; /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) override; /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _task_red_item_t red_data[n]; /// ... /// red_data[i].shar = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data); /// \endcode /// /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) override; /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override; /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override; /// Emit code for 'taskwait' directive. void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override; /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override; /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override; /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override; /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. bool emitTargetFunctions(GlobalDecl GD) override; /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. bool emitTargetGlobalVariable(GlobalDecl GD) override; /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. bool emitTargetGlobal(GlobalDecl GD) override; /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars) override; /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) override; /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override; /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations) override; /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override; /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override; /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override; /// Gets the OpenMP-specific address of the local variable. Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override { return Address::invalid(); } }; } // namespace CodeGen } // namespace clang #endif
mp.c
#include <stdio.h> #define Const(x,y,z) x##y##z #define float Const(un,sign,ed) float x, s[1], a[1], e[1], d[1]; float A(float a, float b){return a&b?A(a^b,(a&b)<<1):a^b;} float P(float a, float b){return a?P(a/10,'-')+putchar(a%10+48)-48:0;} float G(float a, float b){for(;b;b^=a^=b^=a%=b);return !--a;} float F(float a, float b){return b?G(a,b)+F(a,b-1):0;} float S(float a, float b){return a?a<b?0:S(A(a,1+~b),b+2):1;} int main() { *d=25; char str[10][10] = { ":Ugly", ":Good", ":Bad"}; int result[200005]; #pragma omp parallel for private(x) schedule(dynamic, 8) for( x=1; x<=200000; x++ ) { int t, X = x, sum; for( sum=0; X; X/=10 ) sum += X % 10; float y = sum; if ( x % y ) t = 0; else if ( S(F(x,x),1) ) t = 1; else t = 2; result[x] = t; } for( x=1; x<=200000; x++ ) { printf("%d", x); puts( str[result[x]] ); } printf("Who's %d?\n", (*s)[a][e][e][d]); return 0; }
Par-06-OmpParallelNestedForLoops.c
int main(int argc, char **argv) { int a[4] = {1,2,3,4}; int b[4] = {0, 0, 0, 0}; #pragma omp parallel { #pragma omp for for (int i = 0; i < 4; ++i) { a[i] = 3*a[i]; #pragma omp for for (int j = 0; j < 4; ++j) { b[j] += a[i]; } } } return 0; }
BCSR.h
/** * @author : Zhao Chonyyao (cyzhao@zju.edu.cn) * @date : 2021-04-30 * @description: Block Compressed Row Format (BCSR) for sparse matrix. * @version : 1.0 */ #ifndef PhysIKA_BCSR #define PhysIKA_BCSR #include <map> #include <omp.h> #include <vector> #include <Eigen/Dense> #include <Eigen/Sparse> #include "error.h" #include "DEFINE_TYPE.h" template <typename mat_type> using VEC_MAT = std::vector<mat_type, Eigen::aligned_allocator<mat_type>>; namespace PhysIKA { /** * block compressed row format for sparse matrix, for more effective computing. * */ template <typename T, const size_t block_size> class BCSR { public: using ele_type = Eigen::Matrix<T, block_size, block_size>; using VEC = Eigen::Matrix<T, -1, 1>; public: BCSR() : rowsb_(0), colsb_(0), rows_(0), cols_(0), size_(0) {} BCSR(int rowsb, int colsb) : rowsb_(rowsb), colsb_(colsb), rows_(rowsb * block_size), cols_(colsb * block_size), size_(cols_ * rows_) {} BCSR(const BCSR<T, block_size>& other) : rowsb_(other.rowsb()), colsb_(other.colsb()), rows_(other.rows()), cols_(other.cols()), size_(other.size()), value_(other.get_value()), offset_(other.get_offset()), index_(other.get_index()) {} // BCSR(const BCSR<T, block_size>&& other) // :rowsb_(other.rowsb()), colsb_(other.colsb()), rows_(other.rows()),cols_(other.cols()),size_(other.size()), value_(other.get_value()), offset_(other.get_offset()), index_(other.get_index()){} public: void setFromEigenMatrix(const Eigen::SparseMatrix<T, Eigen::RowMajor>& A); inline void clear() { value_.clear(); offset_.clear(); index_.clear(); rowsb_ = colsb_ = rows_ = cols_ = size_ = 0; } inline size_t rows() const { return rows_; } inline size_t cols() const { return cols_; } inline size_t size() const { return size_; } inline size_t rowsb() const { return rowsb_; } inline size_t colsb() const { return colsb_; } inline VEC_MAT<ele_type> get_value() const { return value_; } inline std::vector<size_t> get_offset() const { return offset_; } inline std::vector<size_t> get_index() const { return index_; } // operators override. public: VEC operator*(const VEC& rhs) const; BCSR<T, block_size> operator*(const BCSR<T, block_size>& rhs) const; inline std::vector<ele_type, Eigen::aligned_allocator<ele_type>> get_diagonal() const; //TODO: move this value_ to protected VEC_MAT<ele_type> value_; protected: std::vector<size_t> offset_; std::vector<size_t> index_; // number of rows and cols per block size_t rowsb_, colsb_; // number of rows and cols for the matrix. size_t rows_, cols_, size_; }; template <typename T> VEC_MAT<MAT3<T>> get_block_diagonal(const SPM_R<T>& A); } // namespace PhysIKA //////////////////////////////////////////////////////////////////////// // template implementation // //////////////////////////////////////////////////////////////////////// namespace PhysIKA { template <typename T, const size_t block_size> void BCSR<T, block_size>::setFromEigenMatrix( const Eigen::SparseMatrix<T, Eigen::RowMajor>& A) { clear(); error_msg_ext_cond( A.rows() % block_size != 0 || A.cols() % block_size != 0, "Convert Eigen Matrix to BCSR failed. Since the Matrix's dim cannot " "be divided by block_size. A-->(%lu, %lu) and block_size: %lu", A.rows(), A.cols(), block_size); rows_ = A.rows(); cols_ = A.cols(); size_ = A.size(); rowsb_ = rows_ / block_size; colsb_ = cols_ / block_size; // very simple version. std::map< size_t, ele_type, std::less<size_t>, Eigen::aligned_allocator<std::pair<const size_t, ele_type>>> coos; for (size_t i = 0; i < A.outerSize(); ++i) { for (typename Eigen::SparseMatrix<T, Eigen::RowMajor>::InnerIterator iter( A, i); iter; ++iter) { int rid = iter.row() / block_size, cid = iter.col() / block_size; int bid = rid * colsb_ + cid; if (coos.count(bid) == 0) { coos.insert({ bid, ele_type::Zero() }); } coos[bid]( iter.row() - rid * block_size, iter.col() - cid * block_size) = iter.value(); } } // coo to csr size_t num = 0, last_rid = -1; for (const auto& item : coos) { size_t rid = item.first / colsb_; size_t cid = item.first % colsb_; value_.emplace_back(item.second); index_.emplace_back(cid); if (rid != last_rid) { offset_.emplace_back(num); last_rid = rid; } num++; } offset_.emplace_back(num); } // operators override. // explicit name VEC // TODO: optimize for eigen template <typename T, const size_t block_size> Eigen::Matrix<T, -1, 1> BCSR<T, block_size>::operator*(const VEC& rhs) const { // error_msg_ext_cond( // rhs.rows() != cols_, "BCSR<%lu, %lu>. rhs<%lu>. dim does not match. ", // rows_, cols_, rhs.rows()); VEC res(rows_); res.setZero(); #pragma omp parallel for for (size_t i = 0; i < rowsb_; ++i) { for (size_t j = offset_[i]; j < offset_[i + 1]; ++j) { size_t k = index_[j]; res.segment(i * block_size, block_size).noalias() += value_[j] * rhs.segment(k * block_size, block_size); } } return std::move(res); } template <typename T, const size_t block_size> BCSR<T, block_size> BCSR<T, block_size>::operator*(const BCSR<T, block_size>& rhs) const { // TODO return BCSR<T, block_size>(); } // explicit name ele_type template <typename T, const size_t block_size> std::vector< Eigen::Matrix<T, block_size, block_size>, Eigen::aligned_allocator<Eigen::Matrix<T, block_size, block_size>>> BCSR<T, block_size>::get_diagonal() const { std::vector<ele_type, Eigen::aligned_allocator<ele_type>> block_diag; // very simple version. for (size_t i = 0; i < rowsb_; ++i) { size_t lid = offset_[i], rid = offset_[i + 1]; while (true) { size_t mid = (lid + rid) / 2; size_t cid = index_[mid]; if (cid == i) { block_diag.emplace_back(value_[mid]); break; } else if (cid < i) { lid = mid + 1; } else { rid = mid; } if (lid >= rid) { block_diag.emplace_back(ele_type::Zero()); break; } } } return std::move(block_diag); } template <typename T> VEC_MAT<MAT3<T>> get_block_diagonal(const SPM_R<T>& A) { exit_if(A.rows() != A.cols(), "A should be sysmetric."); VEC_MAT<MAT3<T>> diag_A(A.rows() / 3, MAT3<T>::Zero()); auto fill_one_dim = [&](const size_t offset) -> void { #pragma omp parallel for for (size_t i = offset; i < A.outerSize(); i += 3) { const size_t vert_id = i / 3; const size_t first_index = i - offset; MAT3<T>& A_i = diag_A[vert_id]; for (typename SPM_R<T>::InnerIterator it(A, i); it; ++it) { if (it.index() >= first_index) { size_t diff = it.index() - first_index; for (size_t j = 0; j < diff; ++j) { A_i(offset, j) = 0.0; } A_i(offset, diff) = it.value(); const size_t left = 2 - diff; bool if_advance = true; for (size_t j = 0; j < left; ++j) { if (if_advance) { ++it; ++diff; } if (it && it.index() == first_index + diff) { A_i(offset, diff) = it.value(); if_advance = true; } else { A_i(offset, diff) = 0.0; if_advance = false; } } break; } } } }; fill_one_dim(0); fill_one_dim(1); fill_one_dim(2); return diag_A; } } // namespace PhysIKA #endif
GB_binop__pair_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_int32) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_int32) // C+=b function (dense accum): GB (_Cdense_accumb__pair_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int32_t // A type: int32_t // A pattern? 1 // B type: int32_t // B pattern? 1 // BinaryOp: cij = 1 #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_INT32 || GxB_NO_PAIR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pair_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_binop__ge_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_int32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__ge_int32) // A.*B function (eWiseMult): GB (_AemultB_03__ge_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int32) // A*D function (colscale): GB (_AxD__ge_int32) // D*A function (rowscale): GB (_DxB__ge_int32) // C+=B function (dense accum): GB (_Cdense_accumB__ge_int32) // C+=b function (dense accum): GB (_Cdense_accumb__ge_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int32) // C=scalar+B GB (_bind1st__ge_int32) // C=scalar+B' GB (_bind1st_tran__ge_int32) // C=A+scalar GB (_bind2nd__ge_int32) // C=A'+scalar GB (_bind2nd_tran__ge_int32) // C type: bool // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_INT32 || GxB_NO_GE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ge_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__ge_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__ge_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
zpbsv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_pbsv * * Computes the solution to a system of linear equations A * X = B, * where A is an n-by-n Hermitian positive definite band matrix, and X and B * are n-by-nrhs matrices. The Cholesky decomposition is used to factor A as * * \f[ A = L\times L^H, \f] if uplo = PlasmaLower, * or * \f[ A = U^H\times U, \f] if uplo = PlasmaUpper, * * where U is an upper triangular matrix and L is a lower triangular matrix. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The number of linear equations, i.e., the order of the matrix A. * n >= 0. * * @param[in] kd * The number of subdiagonals within the band of A if uplo=upper. * The number of suuperdiagonals within the band of A. ku >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of columns * of the matrix B. nrhs >= 0. * * @param[in,out] AB * On entry, the upper or lower triangle of the Hermitian band * matrix A, stored in the first KD+1 rows of the array. The * j-th column of A is stored in the j-th column of the array AB * as follows: * if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd) <= i <= j; * if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j <= i <= min(n,j+kd). * \n * On exit, if INFO = 0, the triangular factor U or L from the * Cholesky factorization A = U^H*U or A = L*L^H of the band * matrix A, in the same storage format as A. * * @param[in] ldab * The leading dimension of the array AB. ldab >= max(1,n). * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * @retval > 0 if i, the leading minor of order i of A is not * positive definite, so the factorization could not * be completed, and the solution has not been computed. * ******************************************************************************* * * @sa plasma_omp_zpbsv * @sa plasma_cpbsv * @sa plasma_dpbsv * @sa plasma_spbsv * ******************************************************************************/ int plasma_zpbsv(plasma_enum_t uplo, int n, int kd, int nrhs, plasma_complex64_t *pAB, int ldab, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (kd < 0) { plasma_error("illegal value of kd"); return -3; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -4; } if (ldab < kd+1) { plasma_error("illegal value of ldab"); return -6; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -8; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_pbtrf(plasma, PlasmaComplexDouble, n); // Set tiling parameters. int nb = plasma->nb; // Initialize tile matrix descriptors. int lm = nb*(1+(kd+nb-1)/nb); plasma_desc_t AB; plasma_desc_t B; int retval; retval = plasma_desc_general_band_create(PlasmaComplexDouble, uplo, nb, nb, lm, n, 0, 0, n, n, kd, kd, &AB); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_band_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, ldb, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&AB); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zpb2desc(pAB, ldab, AB, &sequence, &request); plasma_omp_zge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_zpbsv(uplo, AB, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2pb(AB, pAB, ldab, &sequence, &request); plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&AB); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_pbsv * * Solves a Hermitian positive definite band system of linear equations * using Cholesky factorization. * Non-blocking tile version of plasma_zpbsv(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in,out] AB * Descriptor of matrix A. * * @param[in,out] B * Descriptor of right-hand-sides B. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zpbsv * @sa plasma_omp_cpbsv * @sa plasma_omp_dpbsv * @sa plasma_omp_spbsv * ******************************************************************************/ void plasma_omp_zpbsv(plasma_enum_t uplo, plasma_desc_t AB, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return; } if (plasma_desc_check(AB) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (AB.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pzpbtrf(uplo, AB, sequence, request); plasma_pztbsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, AB, B, NULL, sequence, request); plasma_pztbsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, AB, B, NULL, sequence, request); }
hermm_c_dia_u_hi_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> #include <stdlib.h> alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Complex *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Complex beta, ALPHA_Complex *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT j = 0; j < columns; j++) for (ALPHA_INT i = 0; i < mat->rows; i++){ alpha_mul(y[index2(j,i,ldy)],y[index2(j,i,ldy)],beta); alpha_madde(y[index2(j,i,ldy)],x[index2(j,i,ldx)],alpha); } #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Complex* Y = &y[index2(cc,0,ldy)]; const ALPHA_Complex* X = &x[index2(cc,0,ldx)]; for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; if(d > 0){ ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Complex val,val_c; alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha); alpha_mul_2c(val_c,mat->values[index2(di,ar,mat->lval)],alpha); alpha_madde(Y[ar],val,X[ac]); alpha_madde(Y[ac],val_c,X[ar]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
cmontecarlo.c
#include <inttypes.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #ifdef WITHOPENMP #include <omp.h> #endif #include "io.h" #include "abbrev.h" #include "status.h" #include "rpacket.h" #include "cmontecarlo.h" /** Look for a place to insert a value in an inversely sorted float array. * * @param x an inversely (largest to lowest) sorted float array * @param x_insert a value to insert * @param imin lower bound * @param imax upper bound * * @return index of the next boundary to the left */ tardis_error_t reverse_binary_search (const double *x, double x_insert, int64_t imin, int64_t imax, int64_t * result) { /* Have in mind that *x points to a reverse sorted array. That is large values will have small indices and small ones will have large indices. */ tardis_error_t ret_val = TARDIS_ERROR_OK; if (x_insert > x[imin] || x_insert < x[imax]) { ret_val = TARDIS_ERROR_BOUNDS_ERROR; } else { int imid = (imin + imax) >> 1; while (imax - imin > 2) { if (x[imid] < x_insert) { imax = imid + 1; } else { imin = imid; } imid = (imin + imax) >> 1; } if (imax - imin == 2 && x_insert < x[imin + 1]) { *result = imin + 1; } else { *result = imin; } } return ret_val; } /** Insert a value in to an array of line frequencies * * @param nu array of line frequencies * @param nu_insert value of nu key * @param number_of_lines number of lines in the line list * * @return index of the next line ot the red. If the key value is redder than the reddest line returns number_of_lines. */ tardis_error_t line_search (const double *nu, double nu_insert, int64_t number_of_lines, int64_t * result) { tardis_error_t ret_val = TARDIS_ERROR_OK; int64_t imin = 0; int64_t imax = number_of_lines - 1; if (nu_insert > nu[imin]) { *result = imin; } else if (nu_insert < nu[imax]) { *result = imax + 1; } else { ret_val = reverse_binary_search (nu, nu_insert, imin, imax, result); *result = *result + 1; } return ret_val; } tardis_error_t binary_search (const double *x, double x_insert, int64_t imin, int64_t imax, int64_t * result) { /* Have in mind that *x points to a sorted array. Like [1,2,3,4,5,...] */ int imid; tardis_error_t ret_val = TARDIS_ERROR_OK; if (x_insert < x[imin] || x_insert > x[imax]) { ret_val = TARDIS_ERROR_BOUNDS_ERROR; } else { while (imax >= imin) { imid = (imin + imax) / 2; if (x[imid] == x_insert) { *result = imid; break; } else if (x[imid] < x_insert) { imin = imid + 1; } else { imax = imid - 1; } } if (imax - imid == 2 && x_insert < x[imin + 1]) { *result = imin; } else { *result = imin; } } return ret_val; } void angle_aberration_CMF_to_LF (rpacket_t *packet, const storage_model_t *storage) { if (storage->full_relativity) { double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C; double mu_0 = rpacket_get_mu (packet); rpacket_set_mu (packet, (mu_0 + beta) / (1.0 + beta * mu_0)); } } /** Transform the lab frame direction cosine to the CMF * * @param packet * @param storage * @param mu lab frame direction cosine * * @return CMF direction cosine */ double angle_aberration_LF_to_CMF (rpacket_t *packet, const storage_model_t *storage, double mu) { double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C; return (mu - beta) / (1.0 - beta * mu); } double rpacket_doppler_factor (const rpacket_t *packet, const storage_model_t *storage) { double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C; if (!storage->full_relativity) { return 1.0 - rpacket_get_mu (packet) * beta; } else { return (1.0 - rpacket_get_mu (packet) * beta) / sqrt (1 - beta * beta); } } double rpacket_inverse_doppler_factor (const rpacket_t *packet, const storage_model_t *storage) { double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C; if (!storage->full_relativity) { return 1.0 / (1.0 - rpacket_get_mu (packet) * beta); } else { return (1.0 + rpacket_get_mu (packet) * beta) / sqrt (1 - beta * beta); } } double bf_cross_section (const storage_model_t * storage, int64_t continuum_id, double comov_nu) { double bf_xsect; double *x_sect = storage->photo_xsect[continuum_id]->x_sect; double *nu = storage->photo_xsect[continuum_id]->nu; switch (storage->bf_treatment) { case LIN_INTERPOLATION: { int64_t result; tardis_error_t error = binary_search (nu, comov_nu, 0, storage->photo_xsect[continuum_id]->no_of_points - 1, &result); if (error == TARDIS_ERROR_BOUNDS_ERROR) { bf_xsect = 0.0; } else { bf_xsect = x_sect[result-1] + (comov_nu - nu[result-1]) / (nu[result] - nu[result-1]) * (x_sect[result] - x_sect[result-1]); } break; } case HYDROGENIC: { double nu_ratio = nu[0] / comov_nu; bf_xsect = x_sect[0] * nu_ratio * nu_ratio * nu_ratio; break; } default: fprintf (stderr, "(%d) is not a valid bound-free cross section treatment.\n", storage->bf_treatment); exit(1); } return bf_xsect; } void calculate_chi_bf (rpacket_t * packet, storage_model_t * storage) { double doppler_factor = rpacket_doppler_factor (packet, storage); double comov_nu = rpacket_get_nu (packet) * doppler_factor; int64_t no_of_continuum_edges = storage->no_of_edges; int64_t current_continuum_id; line_search(storage->continuum_list_nu, comov_nu, no_of_continuum_edges, &current_continuum_id); rpacket_set_current_continuum_id (packet, current_continuum_id); int64_t shell_id = rpacket_get_current_shell_id (packet); double T = storage->t_electrons[shell_id]; double boltzmann_factor = exp (-(H * comov_nu) / (KB * T)); double bf_helper = 0; for(int64_t i = current_continuum_id; i < no_of_continuum_edges; i++) { // get the level population for the level ijk in the current shell: double l_pop = storage->l_pop[shell_id * no_of_continuum_edges + i]; // get the level population ratio \frac{n_{0,j+1,k}}{n_{i,j,k}} \frac{n_{i,j,k}}{n_{0,j+1,k}}^{*}: double l_pop_r = storage->l_pop_r[shell_id * no_of_continuum_edges + i]; double bf_x_sect = bf_cross_section (storage, i, comov_nu); if (bf_x_sect == 0.0) { break; } bf_helper += l_pop * bf_x_sect * (1.0 - l_pop_r * boltzmann_factor) * doppler_factor; packet->chi_bf_tmp_partial[i] = bf_helper; } rpacket_set_chi_boundfree (packet, bf_helper); } void calculate_chi_ff (rpacket_t * packet, const storage_model_t * storage) { double doppler_factor = rpacket_doppler_factor (packet, storage); double comov_nu = rpacket_get_nu (packet) * doppler_factor; int64_t shell_id = rpacket_get_current_shell_id (packet); double T = storage->t_electrons[shell_id]; double boltzmann_factor = exp (-(H * comov_nu) / KB / T); double chi_ff_factor = storage->chi_ff_factor[shell_id]; double chi_ff = chi_ff_factor * (1 - boltzmann_factor) * pow (comov_nu, -3); rpacket_set_chi_freefree (packet, chi_ff * doppler_factor); } void compute_distance2boundary (rpacket_t * packet, const storage_model_t * storage) { double r = rpacket_get_r (packet); double mu = rpacket_get_mu (packet); double r_outer = storage->r_outer[rpacket_get_current_shell_id (packet)]; double r_inner = storage->r_inner[rpacket_get_current_shell_id (packet)]; double check, distance; if (mu > 0.0) { // direction outward rpacket_set_next_shell_id (packet, 1); distance = sqrt (r_outer * r_outer + ((mu * mu - 1.0) * r * r)) - (r * mu); } else { // going inward if ( (check = r_inner * r_inner + (r * r * (mu * mu - 1.0)) )>= 0.0) { // hit inner boundary rpacket_set_next_shell_id (packet, -1); distance = - r * mu - sqrt (check); } else { // miss inner boundary rpacket_set_next_shell_id (packet, 1); distance = sqrt (r_outer * r_outer + ((mu * mu - 1.0) * r * r)) - (r * mu); } } rpacket_set_d_boundary (packet, distance); } tardis_error_t compute_distance2line (rpacket_t * packet, const storage_model_t * storage) { if (!rpacket_get_last_line (packet)) { double r = rpacket_get_r (packet); double mu = rpacket_get_mu (packet); double nu = rpacket_get_nu (packet); double nu_line = rpacket_get_nu_line (packet); double distance, nu_diff; double ct = storage->time_explosion * C; double doppler_factor = rpacket_doppler_factor (packet, storage); double comov_nu = nu * doppler_factor; if ( (nu_diff = comov_nu - nu_line) >= 0) { if (!storage->full_relativity) { distance = (nu_diff / nu) * ct; } else { double nu_r = nu_line / nu; distance = - mu * r + (ct - nu_r * nu_r * sqrt(ct * ct - (1 + r * r * (1 - mu * mu) * (1 + pow (nu_r, -2))))) / (1 + nu_r * nu_r); } rpacket_set_d_line (packet, distance); return TARDIS_ERROR_OK; } else { if (rpacket_get_next_line_id (packet) == storage->no_of_lines - 1) { fprintf (stderr, "last_line = %f\n", storage-> line_list_nu[rpacket_get_next_line_id (packet) - 1]); fprintf (stderr, "Last line in line list reached!"); } else if (rpacket_get_next_line_id (packet) == 0) { fprintf (stderr, "First line in line list!"); fprintf (stderr, "next_line = %f\n", storage-> line_list_nu[rpacket_get_next_line_id (packet) + 1]); } else { fprintf (stderr, "last_line = %f\n", storage-> line_list_nu[rpacket_get_next_line_id (packet) - 1]); fprintf (stderr, "next_line = %f\n", storage-> line_list_nu[rpacket_get_next_line_id (packet) + 1]); } fprintf (stderr, "ERROR: Comoving nu less than nu_line!\n"); fprintf (stderr, "comov_nu = %f\n", comov_nu); fprintf (stderr, "nu_line = %f\n", nu_line); fprintf (stderr, "(comov_nu - nu_line) / nu_line = %f\n", (comov_nu - nu_line) / nu_line); fprintf (stderr, "r = %f\n", r); fprintf (stderr, "mu = %f\n", mu); fprintf (stderr, "nu = %f\n", nu); fprintf (stderr, "doppler_factor = %f\n", doppler_factor); fprintf (stderr, "cur_zone_id = %" PRIi64 "\n", rpacket_get_current_shell_id (packet)); return TARDIS_ERROR_COMOV_NU_LESS_THAN_NU_LINE; } } else { rpacket_set_d_line (packet, MISS_DISTANCE); return TARDIS_ERROR_OK; } } void compute_distance2continuum (rpacket_t * packet, storage_model_t * storage) { double chi_continuum, d_continuum; double chi_electron = storage->electron_densities[rpacket_get_current_shell_id(packet)] * storage->sigma_thomson; if (storage->full_relativity) { chi_electron *= rpacket_doppler_factor (packet, storage); } if (storage->cont_status == CONTINUUM_ON) { if (packet->compute_chi_bf) { calculate_chi_bf (packet, storage); calculate_chi_ff (packet, storage); } else { packet->compute_chi_bf=true; } chi_continuum = rpacket_get_chi_boundfree (packet) + rpacket_get_chi_freefree (packet) + chi_electron; d_continuum = rpacket_get_tau_event (packet) / chi_continuum; } else { chi_continuum = chi_electron; d_continuum = storage->inverse_electron_densities[rpacket_get_current_shell_id (packet)] * storage->inverse_sigma_thomson * rpacket_get_tau_event (packet); } if (rpacket_get_virtual_packet(packet) > 0) { //Set all continuum distances to MISS_DISTANCE in case of an virtual_packet d_continuum = MISS_DISTANCE; packet->compute_chi_bf = false; } else { // fprintf(stderr, "--------\n"); // fprintf(stderr, "nu = %e \n", rpacket_get_nu(packet)); // fprintf(stderr, "chi_electron = %e\n", chi_electron); // fprintf(stderr, "chi_boundfree = %e\n", calculate_chi_bf(packet, storage)); // fprintf(stderr, "chi_line = %e \n", rpacket_get_tau_event(packet) / rpacket_get_d_line(packet)); // fprintf(stderr, "--------\n"); //rpacket_set_chi_freefree(packet, chi_freefree); rpacket_set_chi_electron (packet, chi_electron); } rpacket_set_chi_continuum (packet, chi_continuum); rpacket_set_d_continuum (packet, d_continuum); } void macro_atom (rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state) { int emit = 0, i = 0, offset = -1; uint64_t activate_level = rpacket_get_macro_atom_activation_level (packet); while (emit >= 0) { double event_random = rk_double (mt_state); i = storage->macro_block_references[activate_level] - 1; double p = 0.0; offset = storage->transition_probabilities_nd * rpacket_get_current_shell_id (packet); do { ++i; p += storage->transition_probabilities[offset + i]; } while (p <= event_random); emit = storage->transition_type[i]; activate_level = storage->destination_level_id[i]; } switch (emit) { case BB_EMISSION: line_emission (packet, storage, storage->transition_line_id[i], mt_state); break; case BF_EMISSION: rpacket_set_current_continuum_id (packet, storage->transition_line_id[i]); storage->last_line_interaction_out_id[rpacket_get_id (packet)] = rpacket_get_current_continuum_id (packet); continuum_emission (packet, storage, mt_state, sample_nu_free_bound, 3); break; case FF_EMISSION: continuum_emission (packet, storage, mt_state, sample_nu_free_free, 4); break; case ADIABATIC_COOLING: storage->last_interaction_type[rpacket_get_id (packet)] = 5; rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED); break; default: fprintf (stderr, "This process for macro-atom deactivation should not exist! (emit = %d)\n", emit); exit(1); } } void move_packet (rpacket_t * packet, storage_model_t * storage, double distance) { double doppler_factor = rpacket_doppler_factor (packet, storage); if (distance > 0.0) { double r = rpacket_get_r (packet); double new_r = sqrt (r * r + distance * distance + 2.0 * r * distance * rpacket_get_mu (packet)); rpacket_set_mu (packet, (rpacket_get_mu (packet) * r + distance) / new_r); rpacket_set_r (packet, new_r); if (rpacket_get_virtual_packet (packet) <= 0) { double comov_energy = rpacket_get_energy (packet) * doppler_factor; double comov_nu = rpacket_get_nu (packet) * doppler_factor; if (storage->full_relativity) { distance *= doppler_factor; } #ifdef WITHOPENMP #pragma omp atomic #endif storage->js[rpacket_get_current_shell_id (packet)] += comov_energy * distance; #ifdef WITHOPENMP #pragma omp atomic #endif storage->nubars[rpacket_get_current_shell_id (packet)] += comov_energy * distance * comov_nu; if (storage->cont_status) { increment_continuum_estimators(packet, storage, distance, comov_nu, comov_energy); } } } } void increment_continuum_estimators (const rpacket_t * packet, storage_model_t * storage, double distance, double comov_nu, double comov_energy) { int64_t current_continuum_id; int64_t no_of_continuum_edges = storage->no_of_edges; int64_t shell_id = rpacket_get_current_shell_id (packet); line_search(storage->continuum_list_nu, comov_nu, no_of_continuum_edges, &current_continuum_id); double T = storage->t_electrons[shell_id]; double boltzmann_factor = exp (-(H * comov_nu) / (KB * T)); #ifdef WITHOPENMP #pragma omp atomic #endif storage->ff_heating_estimator[shell_id] += comov_energy * distance * rpacket_get_chi_freefree (packet); for(int64_t i = current_continuum_id; i < no_of_continuum_edges; i++) { double bf_xsect = bf_cross_section (storage, i, comov_nu); int64_t photo_ion_idx = i * storage->no_of_shells + shell_id; double photo_ion_estimator_helper = comov_energy * distance * bf_xsect / comov_nu; double bf_heating_estimator_helper = comov_energy * distance * bf_xsect * (1. - storage->continuum_list_nu[i] / comov_nu); #ifdef WITHOPENMP #pragma omp atomic #endif storage->photo_ion_estimator[photo_ion_idx] += photo_ion_estimator_helper; #ifdef WITHOPENMP #pragma omp atomic #endif storage->stim_recomb_estimator[photo_ion_idx] += photo_ion_estimator_helper * boltzmann_factor; #ifdef WITHOPENMP #pragma omp atomic #endif storage->bf_heating_estimator[photo_ion_idx] += bf_heating_estimator_helper; #ifdef WITHOPENMP #pragma omp atomic #endif storage->stim_recomb_cooling_estimator[photo_ion_idx] += bf_heating_estimator_helper * boltzmann_factor; if (photo_ion_estimator_helper != 0.0) { #ifdef WITHOPENMP #pragma omp atomic #endif storage->photo_ion_estimator_statistics[photo_ion_idx] += 1; } else { break; } } } double get_increment_j_blue_estimator_energy (const rpacket_t * packet, const storage_model_t * storage, double d_line) { double energy; if (storage->full_relativity) { // Accurate up to a factor 1 / gamma energy = rpacket_get_energy (packet); } else { double r = rpacket_get_r (packet); double r_interaction = sqrt (r * r + d_line * d_line + 2.0 * r * d_line * rpacket_get_mu (packet)); double mu_interaction = (rpacket_get_mu (packet) * r + d_line) / r_interaction; double doppler_factor = 1.0 - mu_interaction * r_interaction * storage->inverse_time_explosion * INVERSE_C; energy = rpacket_get_energy (packet) * doppler_factor; } return energy; } void increment_j_blue_estimator (const rpacket_t * packet, storage_model_t * storage, double d_line, int64_t j_blue_idx) { if (storage->line_lists_j_blues != NULL) { double energy = get_increment_j_blue_estimator_energy (packet, storage, d_line); #ifdef WITHOPENMP #pragma omp atomic #endif storage->line_lists_j_blues[j_blue_idx] += energy / rpacket_get_nu (packet); } } void increment_Edotlu_estimator (const rpacket_t * packet, storage_model_t * storage, double d_line, int64_t line_idx) { if (storage->line_lists_Edotlu != NULL) { double energy = get_increment_j_blue_estimator_energy (packet, storage, d_line); #ifdef WITHOPENMP #pragma omp atomic #endif storage->line_lists_Edotlu[line_idx] += energy; } } int64_t montecarlo_one_packet (storage_model_t * storage, rpacket_t * packet, int64_t virtual_mode, rk_state *mt_state) { int64_t reabsorbed=-1; if (virtual_mode == 0) { reabsorbed = montecarlo_one_packet_loop (storage, packet, 0, mt_state); } else { if ((rpacket_get_nu (packet) > storage->spectrum_virt_start_nu) && (rpacket_get_nu(packet) < storage->spectrum_virt_end_nu)) { for (int64_t i = 0; i < rpacket_get_virtual_packet_flag (packet); i++) { double weight; rpacket_t virt_packet = *packet; double mu_min; if (rpacket_get_r(&virt_packet) > storage->r_inner[0]) { mu_min = -1.0 * sqrt (1.0 - (storage->r_inner[0] / rpacket_get_r(&virt_packet)) * (storage->r_inner[0] / rpacket_get_r(&virt_packet))); if (storage->full_relativity) { // Need to transform the angular size of the photosphere into the CMF mu_min = angle_aberration_LF_to_CMF (&virt_packet, storage, mu_min); } } else { mu_min = 0.0; } double mu_bin = (1.0 - mu_min) / rpacket_get_virtual_packet_flag (packet); rpacket_set_mu(&virt_packet,mu_min + (i + rk_double (mt_state)) * mu_bin); switch (virtual_mode) { case -2: weight = 1.0 / rpacket_get_virtual_packet_flag (packet); break; case -1: weight = 2.0 * rpacket_get_mu(&virt_packet) / rpacket_get_virtual_packet_flag (packet); break; case 1: weight = (1.0 - mu_min) / 2.0 / rpacket_get_virtual_packet_flag (packet); break; default: fprintf (stderr, "Something has gone horribly wrong!\n"); // FIXME MR: we need to somehow signal an error here // I'm adding an exit() here to inform the compiler about the impossible path exit(1); } angle_aberration_CMF_to_LF (&virt_packet, storage); double doppler_factor_ratio = rpacket_doppler_factor (packet, storage) / rpacket_doppler_factor (&virt_packet, storage); rpacket_set_energy(&virt_packet, rpacket_get_energy (packet) * doppler_factor_ratio); rpacket_set_nu(&virt_packet,rpacket_get_nu (packet) * doppler_factor_ratio); reabsorbed = montecarlo_one_packet_loop (storage, &virt_packet, 1, mt_state); #ifdef WITH_VPACKET_LOGGING #ifdef WITHOPENMP #pragma omp critical { #endif // WITHOPENMP if (storage->virt_packet_count >= storage->virt_array_size) { storage->virt_array_size *= 2; storage->virt_packet_nus = safe_realloc(storage->virt_packet_nus, sizeof(double) * storage->virt_array_size); storage->virt_packet_energies = safe_realloc(storage->virt_packet_energies, sizeof(double) * storage->virt_array_size); storage->virt_packet_last_interaction_in_nu = safe_realloc(storage->virt_packet_last_interaction_in_nu, sizeof(double) * storage->virt_array_size); storage->virt_packet_last_interaction_type = safe_realloc(storage->virt_packet_last_interaction_type, sizeof(int64_t) * storage->virt_array_size); storage->virt_packet_last_line_interaction_in_id = safe_realloc(storage->virt_packet_last_line_interaction_in_id, sizeof(int64_t) * storage->virt_array_size); storage->virt_packet_last_line_interaction_out_id = safe_realloc(storage->virt_packet_last_line_interaction_out_id, sizeof(int64_t) * storage->virt_array_size); } storage->virt_packet_nus[storage->virt_packet_count] = rpacket_get_nu(&virt_packet); storage->virt_packet_energies[storage->virt_packet_count] = rpacket_get_energy(&virt_packet) * weight; storage->virt_packet_last_interaction_in_nu[storage->virt_packet_count] = storage->last_interaction_in_nu[rpacket_get_id (packet)]; storage->virt_packet_last_interaction_type[storage->virt_packet_count] = storage->last_interaction_type[rpacket_get_id (packet)]; storage->virt_packet_last_line_interaction_in_id[storage->virt_packet_count] = storage->last_line_interaction_in_id[rpacket_get_id (packet)]; storage->virt_packet_last_line_interaction_out_id[storage->virt_packet_count] = storage->last_line_interaction_out_id[rpacket_get_id (packet)]; storage->virt_packet_count += 1; #ifdef WITHOPENMP } #endif // WITHOPENMP #endif // WITH_VPACKET_LOGGING if ((rpacket_get_nu(&virt_packet) < storage->spectrum_end_nu) && (rpacket_get_nu(&virt_packet) > storage->spectrum_start_nu)) { #ifdef WITHOPENMP #pragma omp critical { #endif // WITHOPENMP int64_t virt_id_nu = floor ((rpacket_get_nu(&virt_packet) - storage->spectrum_start_nu) / storage->spectrum_delta_nu); storage->spectrum_virt_nu[virt_id_nu] += rpacket_get_energy(&virt_packet) * weight; #ifdef WITHOPENMP } #endif // WITHOPENMP } } } else { return 1; } } return reabsorbed; } void move_packet_across_shell_boundary (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state) { move_packet (packet, storage, distance); if (rpacket_get_virtual_packet (packet) > 0) { double delta_tau_event = rpacket_get_chi_continuum(packet) * distance; rpacket_set_tau_event (packet, rpacket_get_tau_event (packet) + delta_tau_event); packet->compute_chi_bf = true; } else { rpacket_reset_tau_event (packet, mt_state); } if ((rpacket_get_current_shell_id (packet) < storage->no_of_shells - 1 && rpacket_get_next_shell_id (packet) == 1) || (rpacket_get_current_shell_id (packet) > 0 && rpacket_get_next_shell_id (packet) == -1)) { rpacket_set_current_shell_id (packet, rpacket_get_current_shell_id (packet) + rpacket_get_next_shell_id (packet)); } else if (rpacket_get_next_shell_id (packet) == 1) { rpacket_set_status (packet, TARDIS_PACKET_STATUS_EMITTED); } else if ((storage->reflective_inner_boundary == 0) || (rk_double (mt_state) > storage->inner_boundary_albedo)) { rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED); } else { double doppler_factor = rpacket_doppler_factor (packet, storage); double comov_nu = rpacket_get_nu (packet) * doppler_factor; double comov_energy = rpacket_get_energy (packet) * doppler_factor; // TODO: correct rpacket_set_mu (packet, rk_double (mt_state)); double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); rpacket_set_nu (packet, comov_nu * inverse_doppler_factor); rpacket_set_energy (packet, comov_energy * inverse_doppler_factor); if (rpacket_get_virtual_packet_flag (packet) > 0) { montecarlo_one_packet (storage, packet, -2, mt_state); } } } void montecarlo_thomson_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state) { move_packet (packet, storage, distance); double doppler_factor = rpacket_doppler_factor (packet, storage); double comov_nu = rpacket_get_nu (packet) * doppler_factor; double comov_energy = rpacket_get_energy (packet) * doppler_factor; rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0); double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); rpacket_set_nu (packet, comov_nu * inverse_doppler_factor); rpacket_set_energy (packet, comov_energy * inverse_doppler_factor); rpacket_reset_tau_event (packet, mt_state); storage->last_interaction_type[rpacket_get_id (packet)] = 1; angle_aberration_CMF_to_LF (packet, storage); if (rpacket_get_virtual_packet_flag (packet) > 0) { montecarlo_one_packet (storage, packet, 1, mt_state); } } void montecarlo_bound_free_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state) { // current position in list of continuum edges -> indicates which bound-free processes are possible int64_t ccontinuum = rpacket_get_current_continuum_id (packet); // Determine in which continuum the bf-absorption occurs double chi_bf = rpacket_get_chi_boundfree (packet); double zrand = rk_double (mt_state); double zrand_x_chibf = zrand * chi_bf; while ((ccontinuum < storage->no_of_edges - 1) && (packet->chi_bf_tmp_partial[ccontinuum] <= zrand_x_chibf)) { ccontinuum++; } rpacket_set_current_continuum_id (packet, ccontinuum); /* For consistency reasons the branching between ionization and thermal energy is determined using the comoving frequency at the initial position instead of the frequency at the point of interaction */ double comov_nu = rpacket_get_nu (packet) * rpacket_doppler_factor (packet, storage); /* Move the packet to the place of absorption, select a direction for re-emission and impose energy conservation in the co-moving frame. */ move_packet (packet, storage, distance); double old_doppler_factor = rpacket_doppler_factor (packet, storage); rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0); double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); double comov_energy = rpacket_get_energy (packet) * old_doppler_factor; rpacket_set_energy (packet, comov_energy * inverse_doppler_factor); storage->last_interaction_type[rpacket_get_id (packet)] = 3; // last interaction was a bf-absorption storage->last_line_interaction_in_id[rpacket_get_id (packet)] = ccontinuum; // Convert the rpacket to thermal or ionization energy zrand = rk_double (mt_state); int64_t activate_level = (zrand < storage->continuum_list_nu[ccontinuum] / comov_nu) ? storage->cont_edge2macro_level[ccontinuum] : storage->kpacket2macro_level; rpacket_set_macro_atom_activation_level (packet, activate_level); macro_atom (packet, storage, mt_state); } void montecarlo_free_free_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state) { /* Move the packet to the place of absorption, select a direction for re-emission and impose energy conservation in the co-moving frame. */ move_packet (packet, storage, distance); double old_doppler_factor = rpacket_doppler_factor (packet, storage); rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0); double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); double comov_energy = rpacket_get_energy (packet) * old_doppler_factor; rpacket_set_energy (packet, comov_energy * inverse_doppler_factor); storage->last_interaction_type[rpacket_get_id (packet)] = 4; // last interaction was a ff-absorption // Create a k-packet rpacket_set_macro_atom_activation_level (packet, storage->kpacket2macro_level); macro_atom (packet, storage, mt_state); } double sample_nu_free_free (const rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state) { int64_t shell_id = rpacket_get_current_shell_id (packet); double T = storage->t_electrons[shell_id]; double zrand = rk_double (mt_state); return -KB * T / H * log(zrand); // Lucy 2003 MC II Eq.41 } double sample_nu_free_bound (const rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state) { int64_t continuum_id = rpacket_get_current_continuum_id (packet); double th_frequency = storage->continuum_list_nu[continuum_id]; int64_t shell_id = rpacket_get_current_shell_id (packet); double T = storage->t_electrons[shell_id]; double zrand = rk_double (mt_state); return th_frequency * (1 - (KB * T / H / th_frequency * log(zrand))); // Lucy 2003 MC II Eq.26 } void montecarlo_line_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state) { uint64_t next_line_id = rpacket_get_next_line_id (packet); uint64_t line2d_idx = next_line_id + storage->no_of_lines * rpacket_get_current_shell_id (packet); if (rpacket_get_virtual_packet (packet) == 0) { increment_j_blue_estimator (packet, storage, distance, line2d_idx); increment_Edotlu_estimator (packet, storage, distance, line2d_idx); } double tau_line = storage->line_lists_tau_sobolevs[line2d_idx]; double tau_continuum = rpacket_get_chi_continuum(packet) * distance; double tau_combined = tau_line + tau_continuum; //rpacket_set_next_line_id (packet, rpacket_get_next_line_id (packet) + 1); if (next_line_id + 1 == storage->no_of_lines) { rpacket_set_last_line (packet, true); } if (rpacket_get_virtual_packet (packet) > 0) { rpacket_set_tau_event (packet, rpacket_get_tau_event (packet) + tau_line); rpacket_set_next_line_id (packet, next_line_id + 1); test_for_close_line (packet, storage); } else if (rpacket_get_tau_event (packet) < tau_combined) { // Line absorption occurs move_packet (packet, storage, distance); double old_doppler_factor = rpacket_doppler_factor (packet, storage); rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0); double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); double comov_energy = rpacket_get_energy (packet) * old_doppler_factor; rpacket_set_energy (packet, comov_energy * inverse_doppler_factor); storage->last_interaction_in_nu[rpacket_get_id (packet)] = rpacket_get_nu (packet); storage->last_line_interaction_in_id[rpacket_get_id (packet)] = next_line_id; storage->last_line_interaction_shell_id[rpacket_get_id (packet)] = rpacket_get_current_shell_id (packet); storage->last_interaction_type[rpacket_get_id (packet)] = 2; if (storage->line_interaction_id == 0) { line_emission (packet, storage, next_line_id, mt_state); } else if (storage->line_interaction_id >= 1) { rpacket_set_macro_atom_activation_level (packet, storage->line2macro_level_upper[next_line_id]); macro_atom (packet, storage, mt_state); } } else { // Packet passes line without interacting rpacket_set_tau_event (packet, rpacket_get_tau_event (packet) - tau_line); rpacket_set_next_line_id (packet, next_line_id + 1); packet->compute_chi_bf = false; test_for_close_line (packet, storage); } } void line_emission (rpacket_t * packet, storage_model_t * storage, int64_t emission_line_id, rk_state *mt_state) { double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); storage->last_line_interaction_out_id[rpacket_get_id (packet)] = emission_line_id; if (storage->cont_status == CONTINUUM_ON) { storage->last_interaction_out_type[rpacket_get_id (packet)] = 2; } rpacket_set_nu (packet, storage->line_list_nu[emission_line_id] * inverse_doppler_factor); rpacket_set_nu_line (packet, storage->line_list_nu[emission_line_id]); rpacket_set_next_line_id (packet, emission_line_id + 1); rpacket_reset_tau_event (packet, mt_state); angle_aberration_CMF_to_LF (packet, storage); if (rpacket_get_virtual_packet_flag (packet) > 0) { bool virtual_close_line = false; if (!rpacket_get_last_line (packet) && fabs (storage->line_list_nu[rpacket_get_next_line_id (packet)] - rpacket_get_nu_line (packet)) < (rpacket_get_nu_line (packet)* 1e-7)) { virtual_close_line = true; } // QUESTIONABLE!!! bool old_close_line = rpacket_get_close_line (packet); rpacket_set_close_line (packet, virtual_close_line); montecarlo_one_packet (storage, packet, 1, mt_state); rpacket_set_close_line (packet, old_close_line); virtual_close_line = false; } test_for_close_line (packet, storage); } void test_for_close_line (rpacket_t * packet, const storage_model_t * storage) { if (!rpacket_get_last_line (packet) && fabs (storage->line_list_nu[rpacket_get_next_line_id (packet)] - rpacket_get_nu_line (packet)) < (rpacket_get_nu_line (packet)* 1e-7)) { rpacket_set_close_line (packet, true); } } void continuum_emission (rpacket_t * packet, storage_model_t * storage, rk_state *mt_state, pt2sample_nu sample_nu_continuum, int64_t emission_type_id) { double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); double nu_comov = sample_nu_continuum (packet, storage, mt_state); rpacket_set_nu (packet, nu_comov * inverse_doppler_factor); rpacket_reset_tau_event (packet, mt_state); storage->last_interaction_out_type[rpacket_get_id (packet)] = emission_type_id; // Have to find current position in line list int64_t current_line_id; line_search (storage->line_list_nu, nu_comov, storage->no_of_lines, &current_line_id); bool last_line = (current_line_id == storage->no_of_lines); rpacket_set_last_line (packet, last_line); rpacket_set_next_line_id (packet, current_line_id); angle_aberration_CMF_to_LF (packet, storage); if (rpacket_get_virtual_packet_flag (packet) > 0) { montecarlo_one_packet (storage, packet, 1, mt_state); } } static void montecarlo_compute_distances (rpacket_t * packet, storage_model_t * storage) { // Check if the last line was the same nu as the current line. if (rpacket_get_close_line (packet)) { // If so set the distance to the line to 0.0 rpacket_set_d_line (packet, 0.0); // Reset close_line. rpacket_set_close_line (packet, false); } else { compute_distance2boundary (packet, storage); compute_distance2line (packet, storage); // FIXME MR: return status of compute_distance2line() is ignored compute_distance2continuum (packet, storage); } } montecarlo_event_handler_t get_event_handler (rpacket_t * packet, storage_model_t * storage, double *distance, rk_state *mt_state) { montecarlo_compute_distances (packet, storage); double d_boundary = rpacket_get_d_boundary (packet); double d_continuum = rpacket_get_d_continuum (packet); double d_line = rpacket_get_d_line (packet); montecarlo_event_handler_t handler; if (d_line <= d_boundary && d_line <= d_continuum) { *distance = d_line; handler = &montecarlo_line_scatter; } else if (d_boundary <= d_continuum) { *distance = d_boundary; handler = &move_packet_across_shell_boundary; } else { *distance = d_continuum; handler = montecarlo_continuum_event_handler (packet, storage, mt_state); } return handler; } montecarlo_event_handler_t montecarlo_continuum_event_handler (rpacket_t * packet, storage_model_t * storage, rk_state *mt_state) { if (storage->cont_status) { double zrand_x_chi_cont = rk_double (mt_state) * rpacket_get_chi_continuum (packet); double chi_th = rpacket_get_chi_electron (packet); double chi_bf = rpacket_get_chi_boundfree (packet); if (zrand_x_chi_cont < chi_th) { return &montecarlo_thomson_scatter; } else if (zrand_x_chi_cont < chi_th + chi_bf) { return &montecarlo_bound_free_scatter; } else { return &montecarlo_free_free_scatter; } } else { return &montecarlo_thomson_scatter; } } int64_t montecarlo_one_packet_loop (storage_model_t * storage, rpacket_t * packet, int64_t virtual_packet, rk_state *mt_state) { rpacket_set_tau_event (packet, 0.0); rpacket_set_nu_line (packet, 0.0); rpacket_set_virtual_packet (packet, virtual_packet); rpacket_set_status (packet, TARDIS_PACKET_STATUS_IN_PROCESS); // Initializing tau_event if it's a real packet. if (virtual_packet == 0) { rpacket_reset_tau_event (packet,mt_state); } // For a virtual packet tau_event is the sum of all the tau's that the packet passes. while (rpacket_get_status (packet) == TARDIS_PACKET_STATUS_IN_PROCESS) { // Check if we are at the end of line list. if (!rpacket_get_last_line (packet)) { rpacket_set_nu_line (packet, storage-> line_list_nu[rpacket_get_next_line_id (packet)]); } double distance; get_event_handler (packet, storage, &distance, mt_state) (packet, storage, distance, mt_state); if (virtual_packet > 0 && rpacket_get_tau_event (packet) > 10.0) { rpacket_set_tau_event (packet, 100.0); rpacket_set_status (packet, TARDIS_PACKET_STATUS_EMITTED); } } if (virtual_packet > 0) { rpacket_set_energy (packet, rpacket_get_energy (packet) * exp (-1.0 * rpacket_get_tau_event (packet))); } return rpacket_get_status (packet) == TARDIS_PACKET_STATUS_REABSORBED ? 1 : 0; } void montecarlo_main_loop(storage_model_t * storage, int64_t virtual_packet_flag, int nthreads, unsigned long seed) { int64_t finished_packets = 0; storage->virt_packet_count = 0; #ifdef WITH_VPACKET_LOGGING storage->virt_packet_nus = (double *)safe_malloc(sizeof(double) * storage->no_of_packets); storage->virt_packet_energies = (double *)safe_malloc(sizeof(double) * storage->no_of_packets); storage->virt_packet_last_interaction_in_nu = (double *)safe_malloc(sizeof(double) * storage->no_of_packets); storage->virt_packet_last_interaction_type = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets); storage->virt_packet_last_line_interaction_in_id = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets); storage->virt_packet_last_line_interaction_out_id = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets); storage->virt_array_size = storage->no_of_packets; #endif // WITH_VPACKET_LOGGING #ifdef WITHOPENMP omp_set_dynamic(0); if (nthreads > 0) { omp_set_num_threads(nthreads); } #pragma omp parallel firstprivate(finished_packets) { rk_state mt_state; rk_seed (seed + omp_get_thread_num(), &mt_state); #pragma omp master { fprintf(stderr, "Running with OpenMP - %d threads\n", omp_get_num_threads()); print_progress(0, storage->no_of_packets); } #else rk_state mt_state; rk_seed (seed, &mt_state); fprintf(stderr, "Running without OpenMP\n"); #endif int64_t chi_bf_tmp_size = (storage->cont_status) ? storage->no_of_edges : 0; double *chi_bf_tmp_partial = safe_malloc(sizeof(double) * chi_bf_tmp_size); #pragma omp for for (int64_t packet_index = 0; packet_index < storage->no_of_packets; ++packet_index) { int reabsorbed = 0; rpacket_t packet; rpacket_set_id(&packet, packet_index); rpacket_init(&packet, storage, packet_index, virtual_packet_flag, chi_bf_tmp_partial); if (virtual_packet_flag > 0) { reabsorbed = montecarlo_one_packet(storage, &packet, -1, &mt_state); } reabsorbed = montecarlo_one_packet(storage, &packet, 0, &mt_state); storage->output_nus[packet_index] = rpacket_get_nu(&packet); if (reabsorbed == 1) { storage->output_energies[packet_index] = -rpacket_get_energy(&packet); } else { storage->output_energies[packet_index] = rpacket_get_energy(&packet); } if ( ++finished_packets%100 == 0 ) { #ifdef WITHOPENMP // WARNING: This only works with a static sheduler and gives an approximation of progress. // The alternative would be to have a shared variable but that could potentially decrease performance when using many threads. if (omp_get_thread_num() == 0 ) print_progress(finished_packets * omp_get_num_threads(), storage->no_of_packets); #else print_progress(finished_packets, storage->no_of_packets); #endif } } free(chi_bf_tmp_partial); #ifdef WITHOPENMP } #endif print_progress(storage->no_of_packets, storage->no_of_packets); fprintf(stderr,"\n"); }
par_smooth.h
#pragma once #include "smooth.h" #include "Defs.h" template <typename T> void par_smooth(const T* src, T* dest, int w, int h) { #pragma omp parallel for for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { dest[y * w + x] = smooth<T>(src, w, h, x, y); } } }
barrier.c
#include<stdio.h> #include<omp.h> int main() { int num; #pragma omp parallel shared(num) { #pragma omp master { num = 4; } #pragma omp barrier printf("%d\n", num); } }
1498.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
convolution_1x1_pack8_fp16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_fp16_pack8_avx(const Mat& kernel, Mat& weight_data_pack8, int num_input, int num_output) { // src = kw-kh-inch-outch // dst = 8b-8a-kw-kh-inch/8a-outch/8b Mat weight_data_r2 = kernel.reshape(1, num_input, num_output); weight_data_pack8.create(1, num_input / 8, num_output / 8, (size_t)2 * 64, 64); for (int q = 0; q + 7 < num_output; q += 8) { const Mat k0 = weight_data_r2.channel(q); const Mat k1 = weight_data_r2.channel(q + 1); const Mat k2 = weight_data_r2.channel(q + 2); const Mat k3 = weight_data_r2.channel(q + 3); const Mat k4 = weight_data_r2.channel(q + 4); const Mat k5 = weight_data_r2.channel(q + 5); const Mat k6 = weight_data_r2.channel(q + 6); const Mat k7 = weight_data_r2.channel(q + 7); unsigned short* g00 = weight_data_pack8.channel(q / 8); for (int p = 0; p + 7 < num_input; p += 8) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k04 = k0.row(p + 4); const float* k05 = k0.row(p + 5); const float* k06 = k0.row(p + 6); const float* k07 = k0.row(p + 7); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k14 = k1.row(p + 4); const float* k15 = k1.row(p + 5); const float* k16 = k1.row(p + 6); const float* k17 = k1.row(p + 7); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k24 = k2.row(p + 4); const float* k25 = k2.row(p + 5); const float* k26 = k2.row(p + 6); const float* k27 = k2.row(p + 7); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k34 = k3.row(p + 4); const float* k35 = k3.row(p + 5); const float* k36 = k3.row(p + 6); const float* k37 = k3.row(p + 7); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k44 = k4.row(p + 4); const float* k45 = k4.row(p + 5); const float* k46 = k4.row(p + 6); const float* k47 = k4.row(p + 7); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k54 = k5.row(p + 4); const float* k55 = k5.row(p + 5); const float* k56 = k5.row(p + 6); const float* k57 = k5.row(p + 7); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k64 = k6.row(p + 4); const float* k65 = k6.row(p + 5); const float* k66 = k6.row(p + 6); const float* k67 = k6.row(p + 7); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); const float* k74 = k7.row(p + 4); const float* k75 = k7.row(p + 5); const float* k76 = k7.row(p + 6); const float* k77 = k7.row(p + 7); g00[0] = float32_to_float16(k00[0]); g00[1] = float32_to_float16(k10[0]); g00[2] = float32_to_float16(k20[0]); g00[3] = float32_to_float16(k30[0]); g00[4] = float32_to_float16(k40[0]); g00[5] = float32_to_float16(k50[0]); g00[6] = float32_to_float16(k60[0]); g00[7] = float32_to_float16(k70[0]); g00 += 8; g00[0] = float32_to_float16(k01[0]); g00[1] = float32_to_float16(k11[0]); g00[2] = float32_to_float16(k21[0]); g00[3] = float32_to_float16(k31[0]); g00[4] = float32_to_float16(k41[0]); g00[5] = float32_to_float16(k51[0]); g00[6] = float32_to_float16(k61[0]); g00[7] = float32_to_float16(k71[0]); g00 += 8; g00[0] = float32_to_float16(k02[0]); g00[1] = float32_to_float16(k12[0]); g00[2] = float32_to_float16(k22[0]); g00[3] = float32_to_float16(k32[0]); g00[4] = float32_to_float16(k42[0]); g00[5] = float32_to_float16(k52[0]); g00[6] = float32_to_float16(k62[0]); g00[7] = float32_to_float16(k72[0]); g00 += 8; g00[0] = float32_to_float16(k03[0]); g00[1] = float32_to_float16(k13[0]); g00[2] = float32_to_float16(k23[0]); g00[3] = float32_to_float16(k33[0]); g00[4] = float32_to_float16(k43[0]); g00[5] = float32_to_float16(k53[0]); g00[6] = float32_to_float16(k63[0]); g00[7] = float32_to_float16(k73[0]); g00 += 8; g00[0] = float32_to_float16(k04[0]); g00[1] = float32_to_float16(k14[0]); g00[2] = float32_to_float16(k24[0]); g00[3] = float32_to_float16(k34[0]); g00[4] = float32_to_float16(k44[0]); g00[5] = float32_to_float16(k54[0]); g00[6] = float32_to_float16(k64[0]); g00[7] = float32_to_float16(k74[0]); g00 += 8; g00[0] = float32_to_float16(k05[0]); g00[1] = float32_to_float16(k15[0]); g00[2] = float32_to_float16(k25[0]); g00[3] = float32_to_float16(k35[0]); g00[4] = float32_to_float16(k45[0]); g00[5] = float32_to_float16(k55[0]); g00[6] = float32_to_float16(k65[0]); g00[7] = float32_to_float16(k75[0]); g00 += 8; g00[0] = float32_to_float16(k06[0]); g00[1] = float32_to_float16(k16[0]); g00[2] = float32_to_float16(k26[0]); g00[3] = float32_to_float16(k36[0]); g00[4] = float32_to_float16(k46[0]); g00[5] = float32_to_float16(k56[0]); g00[6] = float32_to_float16(k66[0]); g00[7] = float32_to_float16(k76[0]); g00 += 8; g00[0] = float32_to_float16(k07[0]); g00[1] = float32_to_float16(k17[0]); g00[2] = float32_to_float16(k27[0]); g00[3] = float32_to_float16(k37[0]); g00[4] = float32_to_float16(k47[0]); g00[5] = float32_to_float16(k57[0]); g00[6] = float32_to_float16(k67[0]); g00[7] = float32_to_float16(k77[0]); g00 += 8; } } } static void conv1x1s1_sgemm_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator); { int nn_size = size / 12; int remain_size_start = nn_size * 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 12; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); __m256 _r4 = _mm256_loadu_ps(img0 + 32); __m256 _r5 = _mm256_loadu_ps(img0 + 40); __m256 _r6 = _mm256_loadu_ps(img0 + 48); __m256 _r7 = _mm256_loadu_ps(img0 + 56); __m256 _r8 = _mm256_loadu_ps(img0 + 64); __m256 _r9 = _mm256_loadu_ps(img0 + 72); __m256 _r10 = _mm256_loadu_ps(img0 + 80); __m256 _r11 = _mm256_loadu_ps(img0 + 88); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); _mm256_storeu_ps(tmpptr + 32, _r4); _mm256_storeu_ps(tmpptr + 40, _r5); _mm256_storeu_ps(tmpptr + 48, _r6); _mm256_storeu_ps(tmpptr + 56, _r7); _mm256_storeu_ps(tmpptr + 64, _r8); _mm256_storeu_ps(tmpptr + 72, _r9); _mm256_storeu_ps(tmpptr + 80, _r10); _mm256_storeu_ps(tmpptr + 88, _r11); tmpptr += 96; img0 += bottom_blob.cstep * 8; } } nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); __m256 _r4 = _mm256_loadu_ps(img0 + 32); __m256 _r5 = _mm256_loadu_ps(img0 + 40); __m256 _r6 = _mm256_loadu_ps(img0 + 48); __m256 _r7 = _mm256_loadu_ps(img0 + 56); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); _mm256_storeu_ps(tmpptr + 32, _r4); _mm256_storeu_ps(tmpptr + 40, _r5); _mm256_storeu_ps(tmpptr + 48, _r6); _mm256_storeu_ps(tmpptr + 56, _r7); tmpptr += 64; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); tmpptr += 32; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); tmpptr += 16; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); _mm256_storeu_ps(tmpptr, _r0); tmpptr += 8; img0 += bottom_blob.cstep * 8; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); float* outptr = out; int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; __m256 _sum4 = _bias0; __m256 _sum5 = _bias0; __m256 _sum6 = _bias0; __m256 _sum7 = _bias0; __m256 _sum8 = _bias0; __m256 _sum9 = _bias0; __m256 _sum10 = _bias0; __m256 _sum11 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_comp_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_comp_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w7, _val37, _sum3); __m256 _val40 = _mm256_broadcast_ss(tmpptr + 32); __m256 _val41 = _mm256_broadcast_ss(tmpptr + 33); __m256 _val42 = _mm256_broadcast_ss(tmpptr + 34); __m256 _val43 = _mm256_broadcast_ss(tmpptr + 35); __m256 _val44 = _mm256_broadcast_ss(tmpptr + 36); __m256 _val45 = _mm256_broadcast_ss(tmpptr + 37); __m256 _val46 = _mm256_broadcast_ss(tmpptr + 38); __m256 _val47 = _mm256_broadcast_ss(tmpptr + 39); __m256 _val50 = _mm256_broadcast_ss(tmpptr + 40); __m256 _val51 = _mm256_broadcast_ss(tmpptr + 41); __m256 _val52 = _mm256_broadcast_ss(tmpptr + 42); __m256 _val53 = _mm256_broadcast_ss(tmpptr + 43); __m256 _val54 = _mm256_broadcast_ss(tmpptr + 44); __m256 _val55 = _mm256_broadcast_ss(tmpptr + 45); __m256 _val56 = _mm256_broadcast_ss(tmpptr + 46); __m256 _val57 = _mm256_broadcast_ss(tmpptr + 47); _sum4 = _mm256_comp_fmadd_ps(_w0, _val40, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w1, _val41, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w2, _val42, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w3, _val43, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w4, _val44, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w5, _val45, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w6, _val46, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w7, _val47, _sum4); _sum5 = _mm256_comp_fmadd_ps(_w0, _val50, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w1, _val51, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w2, _val52, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w3, _val53, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w4, _val54, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w5, _val55, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w6, _val56, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w7, _val57, _sum5); __m256 _val60 = _mm256_broadcast_ss(tmpptr + 48); __m256 _val61 = _mm256_broadcast_ss(tmpptr + 49); __m256 _val62 = _mm256_broadcast_ss(tmpptr + 50); __m256 _val63 = _mm256_broadcast_ss(tmpptr + 51); __m256 _val64 = _mm256_broadcast_ss(tmpptr + 52); __m256 _val65 = _mm256_broadcast_ss(tmpptr + 53); __m256 _val66 = _mm256_broadcast_ss(tmpptr + 54); __m256 _val67 = _mm256_broadcast_ss(tmpptr + 55); __m256 _val70 = _mm256_broadcast_ss(tmpptr + 56); __m256 _val71 = _mm256_broadcast_ss(tmpptr + 57); __m256 _val72 = _mm256_broadcast_ss(tmpptr + 58); __m256 _val73 = _mm256_broadcast_ss(tmpptr + 59); __m256 _val74 = _mm256_broadcast_ss(tmpptr + 60); __m256 _val75 = _mm256_broadcast_ss(tmpptr + 61); __m256 _val76 = _mm256_broadcast_ss(tmpptr + 62); __m256 _val77 = _mm256_broadcast_ss(tmpptr + 63); _sum6 = _mm256_comp_fmadd_ps(_w0, _val60, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w1, _val61, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w2, _val62, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w3, _val63, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w4, _val64, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w5, _val65, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w6, _val66, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w7, _val67, _sum6); _sum7 = _mm256_comp_fmadd_ps(_w0, _val70, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w1, _val71, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w2, _val72, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w3, _val73, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w4, _val74, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w5, _val75, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w6, _val76, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w7, _val77, _sum7); __m256 _val80 = _mm256_broadcast_ss(tmpptr + 64); __m256 _val81 = _mm256_broadcast_ss(tmpptr + 65); __m256 _val82 = _mm256_broadcast_ss(tmpptr + 66); __m256 _val83 = _mm256_broadcast_ss(tmpptr + 67); __m256 _val84 = _mm256_broadcast_ss(tmpptr + 68); __m256 _val85 = _mm256_broadcast_ss(tmpptr + 69); __m256 _val86 = _mm256_broadcast_ss(tmpptr + 70); __m256 _val87 = _mm256_broadcast_ss(tmpptr + 71); __m256 _val90 = _mm256_broadcast_ss(tmpptr + 72); __m256 _val91 = _mm256_broadcast_ss(tmpptr + 73); __m256 _val92 = _mm256_broadcast_ss(tmpptr + 74); __m256 _val93 = _mm256_broadcast_ss(tmpptr + 75); __m256 _val94 = _mm256_broadcast_ss(tmpptr + 76); __m256 _val95 = _mm256_broadcast_ss(tmpptr + 77); __m256 _val96 = _mm256_broadcast_ss(tmpptr + 78); __m256 _val97 = _mm256_broadcast_ss(tmpptr + 79); _sum8 = _mm256_comp_fmadd_ps(_w0, _val80, _sum8); _sum8 = _mm256_comp_fmadd_ps(_w1, _val81, _sum8); _sum8 = _mm256_comp_fmadd_ps(_w2, _val82, _sum8); _sum8 = _mm256_comp_fmadd_ps(_w3, _val83, _sum8); _sum8 = _mm256_comp_fmadd_ps(_w4, _val84, _sum8); _sum8 = _mm256_comp_fmadd_ps(_w5, _val85, _sum8); _sum8 = _mm256_comp_fmadd_ps(_w6, _val86, _sum8); _sum8 = _mm256_comp_fmadd_ps(_w7, _val87, _sum8); _sum9 = _mm256_comp_fmadd_ps(_w0, _val90, _sum9); _sum9 = _mm256_comp_fmadd_ps(_w1, _val91, _sum9); _sum9 = _mm256_comp_fmadd_ps(_w2, _val92, _sum9); _sum9 = _mm256_comp_fmadd_ps(_w3, _val93, _sum9); _sum9 = _mm256_comp_fmadd_ps(_w4, _val94, _sum9); _sum9 = _mm256_comp_fmadd_ps(_w5, _val95, _sum9); _sum9 = _mm256_comp_fmadd_ps(_w6, _val96, _sum9); _sum9 = _mm256_comp_fmadd_ps(_w7, _val97, _sum9); __m256 _val100 = _mm256_broadcast_ss(tmpptr + 80); __m256 _val101 = _mm256_broadcast_ss(tmpptr + 81); __m256 _val102 = _mm256_broadcast_ss(tmpptr + 82); __m256 _val103 = _mm256_broadcast_ss(tmpptr + 83); __m256 _val104 = _mm256_broadcast_ss(tmpptr + 84); __m256 _val105 = _mm256_broadcast_ss(tmpptr + 85); __m256 _val106 = _mm256_broadcast_ss(tmpptr + 86); __m256 _val107 = _mm256_broadcast_ss(tmpptr + 87); __m256 _val110 = _mm256_broadcast_ss(tmpptr + 88); __m256 _val111 = _mm256_broadcast_ss(tmpptr + 89); __m256 _val112 = _mm256_broadcast_ss(tmpptr + 90); __m256 _val113 = _mm256_broadcast_ss(tmpptr + 91); __m256 _val114 = _mm256_broadcast_ss(tmpptr + 92); __m256 _val115 = _mm256_broadcast_ss(tmpptr + 93); __m256 _val116 = _mm256_broadcast_ss(tmpptr + 94); __m256 _val117 = _mm256_broadcast_ss(tmpptr + 95); _sum10 = _mm256_comp_fmadd_ps(_w0, _val100, _sum10); _sum10 = _mm256_comp_fmadd_ps(_w1, _val101, _sum10); _sum10 = _mm256_comp_fmadd_ps(_w2, _val102, _sum10); _sum10 = _mm256_comp_fmadd_ps(_w3, _val103, _sum10); _sum10 = _mm256_comp_fmadd_ps(_w4, _val104, _sum10); _sum10 = _mm256_comp_fmadd_ps(_w5, _val105, _sum10); _sum10 = _mm256_comp_fmadd_ps(_w6, _val106, _sum10); _sum10 = _mm256_comp_fmadd_ps(_w7, _val107, _sum10); _sum11 = _mm256_comp_fmadd_ps(_w0, _val110, _sum11); _sum11 = _mm256_comp_fmadd_ps(_w1, _val111, _sum11); _sum11 = _mm256_comp_fmadd_ps(_w2, _val112, _sum11); _sum11 = _mm256_comp_fmadd_ps(_w3, _val113, _sum11); _sum11 = _mm256_comp_fmadd_ps(_w4, _val114, _sum11); _sum11 = _mm256_comp_fmadd_ps(_w5, _val115, _sum11); _sum11 = _mm256_comp_fmadd_ps(_w6, _val116, _sum11); _sum11 = _mm256_comp_fmadd_ps(_w7, _val117, _sum11); tmpptr += 96; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); _mm256_storeu_ps(outptr + 32, _sum4); _mm256_storeu_ps(outptr + 40, _sum5); _mm256_storeu_ps(outptr + 48, _sum6); _mm256_storeu_ps(outptr + 56, _sum7); _mm256_storeu_ps(outptr + 64, _sum8); _mm256_storeu_ps(outptr + 72, _sum9); _mm256_storeu_ps(outptr + 80, _sum10); _mm256_storeu_ps(outptr + 88, _sum11); outptr += 96; } for (; i + 7 < size; i += 8) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; __m256 _sum4 = _bias0; __m256 _sum5 = _bias0; __m256 _sum6 = _bias0; __m256 _sum7 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_comp_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_comp_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w7, _val37, _sum3); __m256 _val40 = _mm256_broadcast_ss(tmpptr + 32); __m256 _val41 = _mm256_broadcast_ss(tmpptr + 33); __m256 _val42 = _mm256_broadcast_ss(tmpptr + 34); __m256 _val43 = _mm256_broadcast_ss(tmpptr + 35); __m256 _val44 = _mm256_broadcast_ss(tmpptr + 36); __m256 _val45 = _mm256_broadcast_ss(tmpptr + 37); __m256 _val46 = _mm256_broadcast_ss(tmpptr + 38); __m256 _val47 = _mm256_broadcast_ss(tmpptr + 39); __m256 _val50 = _mm256_broadcast_ss(tmpptr + 40); __m256 _val51 = _mm256_broadcast_ss(tmpptr + 41); __m256 _val52 = _mm256_broadcast_ss(tmpptr + 42); __m256 _val53 = _mm256_broadcast_ss(tmpptr + 43); __m256 _val54 = _mm256_broadcast_ss(tmpptr + 44); __m256 _val55 = _mm256_broadcast_ss(tmpptr + 45); __m256 _val56 = _mm256_broadcast_ss(tmpptr + 46); __m256 _val57 = _mm256_broadcast_ss(tmpptr + 47); _sum4 = _mm256_comp_fmadd_ps(_w0, _val40, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w1, _val41, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w2, _val42, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w3, _val43, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w4, _val44, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w5, _val45, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w6, _val46, _sum4); _sum4 = _mm256_comp_fmadd_ps(_w7, _val47, _sum4); _sum5 = _mm256_comp_fmadd_ps(_w0, _val50, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w1, _val51, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w2, _val52, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w3, _val53, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w4, _val54, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w5, _val55, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w6, _val56, _sum5); _sum5 = _mm256_comp_fmadd_ps(_w7, _val57, _sum5); __m256 _val60 = _mm256_broadcast_ss(tmpptr + 48); __m256 _val61 = _mm256_broadcast_ss(tmpptr + 49); __m256 _val62 = _mm256_broadcast_ss(tmpptr + 50); __m256 _val63 = _mm256_broadcast_ss(tmpptr + 51); __m256 _val64 = _mm256_broadcast_ss(tmpptr + 52); __m256 _val65 = _mm256_broadcast_ss(tmpptr + 53); __m256 _val66 = _mm256_broadcast_ss(tmpptr + 54); __m256 _val67 = _mm256_broadcast_ss(tmpptr + 55); __m256 _val70 = _mm256_broadcast_ss(tmpptr + 56); __m256 _val71 = _mm256_broadcast_ss(tmpptr + 57); __m256 _val72 = _mm256_broadcast_ss(tmpptr + 58); __m256 _val73 = _mm256_broadcast_ss(tmpptr + 59); __m256 _val74 = _mm256_broadcast_ss(tmpptr + 60); __m256 _val75 = _mm256_broadcast_ss(tmpptr + 61); __m256 _val76 = _mm256_broadcast_ss(tmpptr + 62); __m256 _val77 = _mm256_broadcast_ss(tmpptr + 63); _sum6 = _mm256_comp_fmadd_ps(_w0, _val60, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w1, _val61, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w2, _val62, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w3, _val63, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w4, _val64, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w5, _val65, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w6, _val66, _sum6); _sum6 = _mm256_comp_fmadd_ps(_w7, _val67, _sum6); _sum7 = _mm256_comp_fmadd_ps(_w0, _val70, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w1, _val71, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w2, _val72, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w3, _val73, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w4, _val74, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w5, _val75, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w6, _val76, _sum7); _sum7 = _mm256_comp_fmadd_ps(_w7, _val77, _sum7); tmpptr += 64; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); _mm256_storeu_ps(outptr + 32, _sum4); _mm256_storeu_ps(outptr + 40, _sum5); _mm256_storeu_ps(outptr + 48, _sum6); _mm256_storeu_ps(outptr + 56, _sum7); outptr += 64; } for (; i + 3 < size; i += 4) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_comp_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_comp_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_comp_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_comp_fmadd_ps(_w7, _val37, _sum3); tmpptr += 32; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); outptr += 32; } for (; i + 1 < size; i += 2) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); _sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1); tmpptr += 16; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); outptr += 16; } for (; i < size; i++) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); __m256 _sum = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _val0 = _mm256_broadcast_ss(tmpptr); __m256 _val1 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val2 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val3 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val4 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val5 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val6 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val7 = _mm256_broadcast_ss(tmpptr + 7); __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); _sum = _mm256_comp_fmadd_ps(_w0, _val0, _sum); _sum = _mm256_comp_fmadd_ps(_w1, _val1, _sum); _sum = _mm256_comp_fmadd_ps(_w2, _val2, _sum); _sum = _mm256_comp_fmadd_ps(_w3, _val3, _sum); _sum = _mm256_comp_fmadd_ps(_w4, _val4, _sum); _sum = _mm256_comp_fmadd_ps(_w5, _val5, _sum); _sum = _mm256_comp_fmadd_ps(_w6, _val6, _sum); _sum = _mm256_comp_fmadd_ps(_w7, _val7, _sum); tmpptr += 8; kptr += 64; } _mm256_storeu_ps(outptr, _sum); outptr += 8; } } } static void conv1x1s2_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const float* r0 = bottom_blob.channel(p); float* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m256 _v = _mm256_loadu_ps(r0); _mm256_storeu_ps(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_fp16_pack8_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
simd-1.c
/* { dg-do run } */ /* { dg-additional-options "-msse2" { target sse2_runtime } } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ #define N 100 #define OFF 32 #define EPS 0.0000000000000001 #include <stdlib.h> void init(double *a, double *a_ref, double *b, double *c, int n, int ioff) { int i; for ( i = 0; i < n; i++ ) { a[i] = i*i; a_ref[i] = a[i]; b[i] = i+i; } int s = -1; for ( i = 0; i < n+ioff; i++ ) { c[i] = s*3; s = -s; } } void star( double *a, double *b, double *c, int n, int *ioff ) { int i; #pragma omp simd for ( i = 0; i < n; i++ ) a[i] *= b[i] * c[i+ *ioff]; } void star_ref( double *a, double *b, double *c, int n, int *ioff ) { int i; for ( i = 0; i < n; i++ ) a[i] *= b[i] * c[i+ *ioff]; } void check (double *a, double *b) { int i; for (i = 0; i < N; i++) if (a[i] - b[i] > EPS || b[i] - a[i] > EPS) abort (); } int main () { double a[N], a_ref[N], b[N], c[N+OFF]; int ioff = OFF; init(a, a_ref, b, c, N, ioff); star(a, b, c, N, &ioff); star_ref(a_ref, b, c, N, &ioff); check(a, a_ref); return 0; }
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/magick.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resize-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { double (*filter)(const double,const ResizeFilter *), (*window)(const double,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension scaling to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ coefficient[7]; /* cubic coefficents for BC-cubic filters */ ResizeWeightingFunctionType filterWeightingType, windowWeightingType; size_t signature; }; /* Forward declaractions. */ static double I0(double x), BesselOrderOne(double), Sinc(const double, const ResizeFilter *), SincFast(const double, const ResizeFilter *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided. % % They are internal to this module only. See AcquireResizeFilterInfo() for % details of the access to these functions, via the GetResizeFilterSupport() % and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const double x,const double support) % % A description of each parameter follows: % % o x: the distance from the sampling point generally in the range of 0 to % support. The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: current filter information. This allows function to % access support, and possibly other pre-calculated information defining % the functions. % */ static double Blackman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2nd order cosine windowing function: 0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x) Refactored by Chantal Racette and Nicolas Robidoux to one trig call and five flops. */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.34+cosine*(0.5+cosine*0.16)); } static double Bohman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function: (1-x) cos(pi x) + sin(pi x) / pi. Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops, taking advantage of the fact that the support of Bohman is 1.0 (so that we know that sin(pi x) >= 0). */ const double cosine = cos((double) (MagickPI*x)); const double sine=sqrt(1.0-cosine*cosine); magick_unreferenced(resize_filter); return((1.0-x)*cosine+(1.0/MagickPI)*sine); } static double Box(const double magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(x); magick_unreferenced(resize_filter); /* A Box filter is a equal weighting function (all weights equal). DO NOT LIMIT results by support or resize point sampling will work as it requests points beyond its normal 0.0 support size. */ return(1.0); } static double Cosine(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Cosine window function: cos((pi/2)*x). */ return(cos((double) (MagickPI2*x))); } static double CubicBC(const double x,const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears Spline B = 1 C = 0 B-Spline Gaussian approximation Hermite B = 0 C = 0 B-Spline interpolator See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/ Mitchell.pdf. Coefficents are determined from B,C values: P0 = ( 6 - 2*B )/6 = coeff[0] P1 = 0 P2 = (-18 +12*B + 6*C )/6 = coeff[1] P3 = ( 12 - 9*B - 6*C )/6 = coeff[2] Q0 = ( 8*B +24*C )/6 = coeff[3] Q1 = ( -12*B -48*C )/6 = coeff[4] Q2 = ( 6*B +30*C )/6 = coeff[5] Q3 = ( - 1*B - 6*C )/6 = coeff[6] which are used to define the filter: P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2 which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->coefficient[0]+x*(x* (resize_filter->coefficient[1]+x*resize_filter->coefficient[2]))); if (x < 2.0) return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x* (resize_filter->coefficient[5]+x*resize_filter->coefficient[6]))); return(0.0); } static double CubicSpline(const double x,const ResizeFilter *resize_filter) { if (resize_filter->support <= 2.0) { /* 2-lobe Spline filter. */ if (x < 1.0) return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0); if (x < 2.0) return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0)); return(0.0); } if (resize_filter->support <= 3.0) { /* 3-lobe Spline filter. */ if (x < 1.0) return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0); if (x < 2.0) return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0)); if (x < 3.0) return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0)); return(0.0); } /* 4-lobe Spline filter. */ if (x < 1.0) return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0); if (x < 2.0) return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0)); if (x < 3.0) return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0)); if (x < 4.0) return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0)); return(0.0); } static double Gaussian(const double x,const ResizeFilter *resize_filter) { /* Gaussian with a sigma = 1/2 (or as user specified) Gaussian Formula (1D) ... exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2)) Gaussian Formula (2D) ... exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) or for radius exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) Note that it is only a change from 1-d to radial form is in the normalization multiplier which is not needed or used when Gaussian is used as a filter. The constants are pre-calculated... coeff[0]=sigma; coeff[1]=1.0/(2.0*sigma^2); coeff[2]=1.0/(sqrt(2*PI)*sigma^2); exp( -coeff[1]*(x^2)) ) * coeff[2]; However the multiplier coeff[1] is need, the others are informative only. This separates the gaussian 'sigma' value from the 'blur/support' settings allowing for its use in special 'small sigma' gaussians, without the filter 'missing' pixels because the support becomes too small. */ return(exp((double)(-resize_filter->coefficient[1]*x*x))); } static double Hann(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: 0.5+0.5*cos(pi*x). */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.5+0.5*cosine); } static double Hamming(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.54+0.46*cosine); } static double Jinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions. http://mathworld.wolfram.com/JincFunction.html and page 11 of http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf The original "zoom" program by Paul Heckbert called this "Bessel". But really it is more accurately named "Jinc". */ if (x == 0.0) return(0.5*MagickPI); return(BesselOrderOne(MagickPI*x)/x); } static double Kaiser(const double x,const ResizeFilter *resize_filter) { /* Kaiser Windowing Function (bessel windowing) I0( beta * sqrt( 1-x^2) ) / IO(0) Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5). However it is typically defined in terms of Alpha*PI The normalization factor (coeff[1]) is not actually needed, but without it the filters has a large value at x=0 making it difficult to compare the function with other windowing functions. */ return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]* sqrt((double) (1.0-x*x)))); } static double Lagrange(const double x,const ResizeFilter *resize_filter) { double value; ssize_t i; ssize_t n, order; /* Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is: for a support of 2, it gives a lagrange-4 (piecewise cubic function). "n" identifies the piece of the piecewise polynomial. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064. */ if (x > resize_filter->support) return(0.0); order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */ n=(ssize_t) (resize_filter->window_support+x); value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static double Quadratic(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static double Sinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Scaled sinc(x) function using a trig call: sinc(x) == sin(pi x)/(pi x). */ if (x != 0.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } return((double) 1.0); } static double SincFast(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Approximations of the sinc function sin(pi x)/(pi x) over the interval [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding from the Natural Sciences and Engineering Research Council of Canada. Although the approximations are polynomials (for low order of approximation) and quotients of polynomials (for higher order of approximation) and consequently are similar in form to Taylor polynomials / Pade approximants, the approximations are computed with a completely different technique. Summary: These approximations are "the best" in terms of bang (accuracy) for the buck (flops). More specifically: Among the polynomial quotients that can be computed using a fixed number of flops (with a given "+ - * / budget"), the chosen polynomial quotient is the one closest to the approximated function with respect to maximum absolute relative error over the given interval. The Remez algorithm, as implemented in the boost library's minimax package, is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/ math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html If outside of the interval of approximation, use the standard trig formula. */ if (x > 4.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } { /* The approximations only depend on x^2 (sinc is an even function). */ const double xx = x*x; #if MAGICKCORE_QUANTUM_DEPTH <= 8 /* Maximum absolute relative error 6.3e-6 < 1/2^17. */ const double c0 = 0.173610016489197553621906385078711564924e-2L; const double c1 = -0.384186115075660162081071290162149315834e-3L; const double c2 = 0.393684603287860108352720146121813443561e-4L; const double c3 = -0.248947210682259168029030370205389323899e-5L; const double c4 = 0.107791837839662283066379987646635416692e-6L; const double c5 = -0.324874073895735800961260474028013982211e-8L; const double c6 = 0.628155216606695311524920882748052490116e-10L; const double c7 = -0.586110644039348333520104379959307242711e-12L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #elif MAGICKCORE_QUANTUM_DEPTH <= 16 /* Max. abs. rel. error 2.2e-8 < 1/2^25. */ const double c0 = 0.173611107357320220183368594093166520811e-2L; const double c1 = -0.384240921114946632192116762889211361285e-3L; const double c2 = 0.394201182359318128221229891724947048771e-4L; const double c3 = -0.250963301609117217660068889165550534856e-5L; const double c4 = 0.111902032818095784414237782071368805120e-6L; const double c5 = -0.372895101408779549368465614321137048875e-8L; const double c6 = 0.957694196677572570319816780188718518330e-10L; const double c7 = -0.187208577776590710853865174371617338991e-11L; const double c8 = 0.253524321426864752676094495396308636823e-13L; const double c9 = -0.177084805010701112639035485248501049364e-15L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9)))))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #else /* Max. abs. rel. error 1.2e-12 < 1/2^39. */ const double c0 = 0.173611111110910715186413700076827593074e-2L; const double c1 = -0.289105544717893415815859968653611245425e-3L; const double c2 = 0.206952161241815727624413291940849294025e-4L; const double c3 = -0.834446180169727178193268528095341741698e-6L; const double c4 = 0.207010104171026718629622453275917944941e-7L; const double c5 = -0.319724784938507108101517564300855542655e-9L; const double c6 = 0.288101675249103266147006509214934493930e-11L; const double c7 = -0.118218971804934245819960233886876537953e-13L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); const double d0 = 1.0L; const double d1 = 0.547981619622284827495856984100563583948e-1L; const double d2 = 0.134226268835357312626304688047086921806e-2L; const double d3 = 0.178994697503371051002463656833597608689e-4L; const double d4 = 0.114633394140438168641246022557689759090e-6L; const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p); #endif } } static double Triangle(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function for Sinc(). */ if (x < 1.0) return(1.0-x); return(0.0); } static double Welch(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Welch parabolic windowing filter. */ if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Spline Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Jinc (Bessel) % % Windowed Sinc/Jinc Filters % Blackman Bohman Lanczos % Hann Hamming Cosine % Kaiser Welch Parzen % Bartlett % % Special Purpose Filters % Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp % Robidoux RobidouxSharp % % The users "-filter" selection is used to lookup the default 'expert' % settings for that filter from a internal table. However any provided % 'expert' settings (see below) may override this selection. % % FIR filters are used as is, and are limited to that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (currently 1.5 or approximately 3*sigma % as recommended by many references) % % The special a 'cylindrical' filter flag will promote the default 4-lobed % Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better % suited to this style of image resampling. This typically happens when using % such a filter for images distortions. % % SPECIFIC FILTERS: % % Directly requesting 'Sinc', 'Jinc' function as a filter will force the use % of function without any windowing, or promotion for cylindrical usage. This % is not recommended, except by image processing experts, especially as part % of expert option filter function selection. % % Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is % computed using the traditional sin(pi*x)/(pi*x); it is selected if the user % specifically specifies the use of a Sinc filter. SincFast uses highly % accurate (and fast) polynomial (low Q) and rational (high Q) approximations, % and will be used by default in most cases. % % The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted % to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use). % The Sinc version is the most popular windowed filter. % % LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of % the Lanczos filter, specifically designed for EWA distortion (as a % Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos % (Sinc-Sinc) filter. The chosen blur value comes as close as possible to % satisfying the following condition without changing the character of the % corresponding EWA filter: % % 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with % only vertical or horizontal features are preserved when performing 'no-op" % with EWA distortion. % % The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos % filters. The 'sharp' version uses a blur factor of 0.9549963639785485, % again chosen because the resulting EWA filter comes as close as possible to % satisfying the above condition. % % Robidoux is another filter tuned for EWA. It is the Keys cubic filter % defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op' % Vertical and Horizontal Line Preservation Condition" exactly, and it % moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns % out to be close to both Mitchell and Lanczos2Sharp. For example, its first % crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the % first crossing of Mitchell and Lanczos2Sharp. % % RobidouxSharp is a slightly sharper version of Robidoux, some believe it % is too sharp. It is designed to minimize the maximum possible change in % a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op % conditions. Amazingly Mitchell falls roughly between Robidoux and % RobidouxSharp, though this seems to have been pure coincidence. % % 'EXPERT' OPTIONS: % % These artifact "defines" are not recommended for production use without % expert knowledge of resampling, filtering, and the effects they have on the % resulting resampled (resized or distorted) image. % % They can be used to override any and all filter default, and it is % recommended you make good use of "filter:verbose" to make sure that the % overall effect of your selection (before and after) is as expected. % % "filter:verbose" controls whether to output the exact results of the % filter selections made, as well as plotting data for graphing the % resulting filter over the filters support range. % % "filter:filter" select the main function associated with this filter % name, as the weighting function of the filter. This can be used to % set a windowing function as a weighting function, for special % purposes, such as graphing. % % If a "filter:window" operation has not been provided, a 'Box' % windowing function will be set to denote that no windowing function is % being used. % % "filter:window" Select this windowing function for the filter. While any % filter could be used as a windowing function, using the 'first lobe' of % that filter over the whole support window, using a non-windowing % function is not advisible. If no weighting filter function is specified % a 'SincFast' filter is used. % % "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a % simpler method of setting filter support size that will correctly % handle the Sinc/Jinc switch for an operators filtering requirements. % Only integers should be given. % % "filter:support" Set the support size for filtering to the size given. % This not recommended for Sinc/Jinc windowed filters (lobes should be % used instead). This will override any 'filter:lobes' option. % % "filter:win-support" Scale windowing function to this size instead. This % causes the windowing (or self-windowing Lagrange filter) to act is if % the support window it much much larger than what is actually supplied % to the calling operator. The filter however is still clipped to the % real support size given, by the support range supplied to the caller. % If unset this will equal the normal filter support size. % % "filter:blur" Scale the filter and support window by this amount. A value % of > 1 will generally result in a more blurred image with more ringing % effects, while a value <1 will sharpen the resulting image with more % aliasing effects. % % "filter:sigma" The sigma value to use for the Gaussian filter only. % Defaults to '1/2'. Using a different sigma effectively provides a % method of using the filter as a 'blur' convolution. Particularly when % using it for Distort. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic filter. % If only one of these are given it is assumes to be a 'Keys' type of % filter such that B+2C=1, where Keys 'alpha' value = C. % % Examples: % % Set a true un-windowed Sinc filter with 10 lobes (very slow): % -define filter:filter=Sinc % -define filter:lobes=8 % % Set an 8 lobe Lanczos (Sinc or Jinc) filter: % -filter Lanczos % -define filter:lobes=8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterType filter_type,const MagickBooleanType cylindrical, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % The artifact settings listed above will override those selections. % % o blur: blur the filter by this amount, use 1.0 if unknown. Image % artifact "filter:blur" will override this API call usage, including any % internal change (such as for cylindrical usage). % % o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial) % filter (Jinc). % % o exception: return any errors or warnings in this structure. % */ MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image, const FilterType filter,const MagickBooleanType cylindrical, ExceptionInfo *exception) { const char *artifact; FilterType filter_type, window_type; double B, C, value; ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a "cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was specifically requested by the user. WARNING: The order of this table must match the order of the FilterType enumeration specified in "resample.h", or the filter names will not match the filter being setup. You can check filter setups with the "filter:verbose" expert setting. */ static struct { FilterType filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */ { PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */ { BoxFilter, BoxFilter }, /* Box averaging filter */ { TriangleFilter, BoxFilter }, /* Linear interpolation filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */ { SincFastFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */ { GaussianFilter, BoxFilter }, /* Gaussian blur filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */ { CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */ { CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */ { MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */ { JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */ { SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */ { SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */ { SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */ { LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */ { SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */ { SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */ { SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */ { LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */ { LanczosSharpFilter, LanczosSharpFilter }, /* | these require */ { Lanczos2Filter, Lanczos2Filter }, /* | special handling */ { Lanczos2SharpFilter, Lanczos2SharpFilter }, { RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */ { RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */ { LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */ { SplineFilter, BoxFilter }, /* Spline Cubic Filter */ { LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */ { CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */ }; /* Table mapping the filter/window from the above table to an actual function. The default support size for that filter as a weighting function, the range to scale with to use that function as a sinc windowing function, (typ 1.0). Note that the filter_type -> function is 1 to 1 except for Sinc(), SincFast(), and CubicBC() functions, which may have multiple filter to function associations. See "filter:verbose" handling below for the function -> filter mapping. */ static struct { double (*function)(const double,const ResizeFilter*), support, /* Default lobes/support size of the weighting filter. */ scale, /* Support when function used as a windowing function Typically equal to the location of the first zero crossing. */ B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */ ResizeWeightingFunctionType weightingFunctionType; } const filters[SentinelFilter] = { /* .--- support window (if used as a Weighting Function) | .--- first crossing (if used as a Windowing Function) | | .--- B value for Cubic Function | | | .---- C value for Cubic Function | | | | */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */ { Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */ { CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */ { Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */ { Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */ { Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */ { Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */ { Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */ { CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */ { CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */ { Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */ { Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */ { SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */ { Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */ { Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */ { Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */ { Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */ /* Robidoux: Keys cubic close to Lanczos2D sharpened */ { CubicBC, 2.0, 1.1685777620836932, 0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction }, /* RobidouxSharp: Sharper version of Robidoux */ { CubicBC, 2.0, 1.105822933719019, 0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction }, { Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */ { CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */ }; /* The known zero crossings of the Jinc() or more accurately the Jinc(x*PI) function being used as a filter. It is used by the "filter:lobes" expert setting and for 'lobes' for Jinc functions in the previous table. This way users do not have to deal with the highly irrational lobe sizes of the Jinc filter. Values taken from http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using Jv-function with v=1, then dividing by PI. */ static double jinc_zeros[16] = { 1.2196698912665045, 2.2331305943815286, 3.2383154841662362, 4.2410628637960699, 5.2427643768701817, 6.2439216898644877, 7.2447598687199570, 8.2453949139520427, 9.2458926849494673, 10.246293348754916, 11.246622794877883, 12.246898461138105, 13.247132522181061, 14.247333735806849, 15.247508563037300, 16.247661874700962 }; /* Allocate resize filter. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) exception; resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter)); (void) memset(resize_filter,0,sizeof(*resize_filter)); /* Defaults for the requested filter. */ filter_type=mapping[filter].filter; window_type=mapping[filter].window; resize_filter->blur=1.0; /* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */ if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) && (filter != SincFastFilter)) filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */ /* Expert filter setting override */ artifact=GetImageArtifact(image,"filter:filter"); if (IsStringTrue(artifact) != MagickFalse) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { /* Raw filter request - no window function. */ filter_type=(FilterType) option; window_type=BoxFilter; } /* Filter override with a specific window function. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) window_type=(FilterType) option; } } else { /* Window specified, but no filter function? Assume Sinc/Jinc. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { filter_type= cylindrical != MagickFalse ? JincFilter : SincFastFilter; window_type=(FilterType) option; } } } /* Assign the real functions to use for the filters selected. */ resize_filter->filter=filters[filter_type].function; resize_filter->support=filters[filter_type].support; resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType; resize_filter->window=filters[window_type].function; resize_filter->windowWeightingType=filters[window_type].weightingFunctionType; resize_filter->scale=filters[window_type].scale; resize_filter->signature=MagickCoreSignature; /* Filter Modifications for orthogonal/cylindrical usage */ if (cylindrical != MagickFalse) switch (filter_type) { case BoxFilter: /* Support for Cylindrical Box should be sqrt(2)/2 */ resize_filter->support=(double) MagickSQ1_2; break; case LanczosFilter: case LanczosSharpFilter: case Lanczos2Filter: case Lanczos2SharpFilter: case LanczosRadiusFilter: resize_filter->filter=filters[JincFilter].function; resize_filter->window=filters[JincFilter].function; resize_filter->scale=filters[JincFilter].scale; /* number of lobes (support window size) remain unchanged */ break; default: break; } /* Global Sharpening (regardless of orthoginal/cylindrical) */ switch (filter_type) { case LanczosSharpFilter: resize_filter->blur *= 0.9812505644269356; break; case Lanczos2SharpFilter: resize_filter->blur *= 0.9549963639785485; break; /* case LanczosRadius: blur adjust is done after lobes */ default: break; } /* Expert Option Modifications. */ /* User Gaussian Sigma Override - no support change */ if ((resize_filter->filter == Gaussian) || (resize_filter->window == Gaussian) ) { value=0.5; /* guassian sigma default, half pixel */ artifact=GetImageArtifact(image,"filter:sigma"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Gaussian */ resize_filter->coefficient[0]=value; /* note sigma too */ resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */ resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value); /* normalization - not actually needed or used! */ if ( value > 0.5 ) resize_filter->support *= 2*value; /* increase support linearly */ } /* User Kaiser Alpha Override - no support change */ if ((resize_filter->filter == Kaiser) || (resize_filter->window == Kaiser) ) { value=6.5; /* default beta value for Kaiser bessel windowing function */ artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */ if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-beta"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-alpha"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL)*MagickPI; /* Define coefficents for Kaiser Windowing Function */ resize_filter->coefficient[0]=value; /* alpha */ resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */ } /* Support Overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { ssize_t lobes; lobes=(ssize_t) StringToLong(artifact); if (lobes < 1) lobes=1; resize_filter->support=(double) lobes; } if (resize_filter->filter == Jinc) { /* Convert a Jinc function lobes value to a real support value. */ if (resize_filter->support > 16) resize_filter->support=jinc_zeros[15]; /* largest entry in table */ else resize_filter->support=jinc_zeros[((long) resize_filter->support)-1]; /* Blur this filter so support is a integer value (lobes dependant). */ if (filter_type == LanczosRadiusFilter) resize_filter->blur*=floor(resize_filter->support)/ resize_filter->support; } /* Expert blur override. */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur*=StringToDouble(artifact,(char **) NULL); if (resize_filter->blur < MagickEpsilon) resize_filter->blur=(double) MagickEpsilon; /* Expert override of the support setting. */ artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL)); /* Scale windowing function separately to the support 'clipping' window that calling operator is planning to actually use. (Expert override) */ resize_filter->window_support=resize_filter->support; /* default */ artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL)); /* Adjust window function scaling to match windowing support for weighting function. This avoids a division on every filter call. */ resize_filter->scale*=PerceptibleReciprocal(resize_filter->window_support); /* Set Cubic Spline B,C values, calculate Cubic coefficients. */ B=0.0; C=0.0; if ((resize_filter->filter == CubicBC) || (resize_filter->window == CubicBC) ) { B=filters[filter_type].B; C=filters[filter_type].C; if (filters[window_type].function == CubicBC) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=StringToDouble(artifact,(char **) NULL); C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */ artifact=GetImageArtifact(image,"filter:c"); /* user C override */ if (artifact != (const char *) NULL) C=StringToDouble(artifact,(char **) NULL); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=StringToDouble(artifact,(char **) NULL); B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */ } } { const double twoB = B+B; /* Convert B,C values into Cubic Coefficents. See CubicBC(). */ resize_filter->coefficient[0]=1.0-(1.0/3.0)*B; resize_filter->coefficient[1]=-3.0+twoB+C; resize_filter->coefficient[2]=2.0-1.5*B-C; resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C; resize_filter->coefficient[4]=-8.0*C-twoB; resize_filter->coefficient[5]=B+5.0*C; resize_filter->coefficient[6]=(-1.0/6.0)*B-C; } } /* Expert Option Request for verbose details of the resulting filter. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp master { #endif if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse) { double support, x; /* Set the weighting function properly when the weighting function may not exactly match the filter of the same name. EG: a Point filter is really uses a Box weighting function with a different support than is typically used. */ if (resize_filter->filter == Box) filter_type=BoxFilter; if (resize_filter->filter == Sinc) filter_type=SincFilter; if (resize_filter->filter == SincFast) filter_type=SincFastFilter; if (resize_filter->filter == Jinc) filter_type=JincFilter; if (resize_filter->filter == CubicBC) filter_type=CubicFilter; if (resize_filter->window == Box) window_type=BoxFilter; if (resize_filter->window == Sinc) window_type=SincFilter; if (resize_filter->window == SincFast) window_type=SincFastFilter; if (resize_filter->window == Jinc) window_type=JincFilter; if (resize_filter->window == CubicBC) window_type=CubicFilter; /* Report Filter Details. */ support=GetResizeFilterSupport(resize_filter); /* practical_support */ (void) FormatLocaleFile(stdout, "# Resampling Filter (for graphing)\n#\n"); (void) FormatLocaleFile(stdout,"# filter = %s\n", CommandOptionToMnemonic(MagickFilterOptions,filter_type)); (void) FormatLocaleFile(stdout,"# window = %s\n", CommandOptionToMnemonic(MagickFilterOptions,window_type)); (void) FormatLocaleFile(stdout,"# support = %.*g\n", GetMagickPrecision(),(double) resize_filter->support); (void) FormatLocaleFile(stdout,"# window-support = %.*g\n", GetMagickPrecision(),(double) resize_filter->window_support); (void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n", GetMagickPrecision(),(double) resize_filter->blur); if ((filter_type == GaussianFilter) || (window_type == GaussianFilter)) (void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n", GetMagickPrecision(),(double) resize_filter->coefficient[0]); if ( filter_type == KaiserFilter || window_type == KaiserFilter ) (void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n", GetMagickPrecision(),(double) resize_filter->coefficient[0]); (void) FormatLocaleFile(stdout,"# practical-support = %.*g\n", GetMagickPrecision(), (double) support); if ((filter_type == CubicFilter) || (window_type == CubicFilter)) (void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n", GetMagickPrecision(),(double) B,GetMagickPrecision(),(double) C); (void) FormatLocaleFile(stdout,"\n"); /* Output values of resulting filter graph -- for graphing filter result. */ for (x=0.0; x <= support; x+=0.01f) (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x, GetMagickPrecision(),(double) GetResizeFilterWeight(resize_filter,x)); /* A final value so gnuplot can graph the 'stop' properly. */ (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support, GetMagickPrecision(),0.0); } /* Output the above once only for each image - remove setting */ (void) DeleteImageArtifact((Image *) image,"filter:verbose"); #if defined(MAGICKCORE_OPENMP_SUPPORT) } #endif return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % This is shortcut function for a fast interpolative resize using mesh % interpolation. It works well for small resizes of less than +/- 50% % of the original image size. For larger resizing on images a full % filtered and slower resize function should be used instead. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { Image *resize_image; resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel, exception); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0. This is used to create the Jinc() filter function below. % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % double BesselOrderOne(double x) % % A description of each parameter follows: % % o x: double value. % */ #undef I0 static double I0(double x) { double sum, t, y; ssize_t i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((double) i*i); } return(sum); } #undef J1 static double J1(double x) { double p, q; ssize_t i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static double P1(double x) { double p, q; ssize_t i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static double Q1(double x) { double p, q; ssize_t i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static double BesselOrderOne(double x) { double p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin(x)- cos(x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin(x)+cos(x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the DestroyResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); resize_filter->signature=(~MagickCoreSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % double GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickPrivate double *GetResizeFilterCoefficient( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return((double *) resize_filter->coefficient); } MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->blur); } MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->scale); } MagickPrivate double GetResizeFilterWindowSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->window_support); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->filterWeightingType); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->windowWeightingType); } MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % double GetResizeFilterWeight(const ResizeFilter *resize_filter, % const double x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter, const double x) { double scale, weight, x_blur; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); x_blur=fabs((double) x)*PerceptibleReciprocal(resize_filter->blur); /* X offset with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point or Box Filter -- avoid division by zero */ else { scale=resize_filter->scale; scale=resize_filter->window(x_blur*scale,resize_filter); } weight=scale*resize_filter->filter(x_blur,resize_filter); return(weight); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolativeResizeImage() resizes an image using the specified % interpolation method. % % The format of the InterpolativeResizeImage method is: % % Image *InterpolativeResizeImage(const Image *image,const size_t columns, % const size_t rows,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *InterpolativeResizeImage(const Image *image, const size_t columns,const size_t rows,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define InterpolativeResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; Image *resize_image; MagickBooleanType status; MagickOffsetType progress; PointInfo scale; ssize_t y; /* Interpolatively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); scale.x=(double) image->columns/resize_image->columns; scale.y=(double) image->rows/resize_image->rows; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { PointInfo offset; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (Quantum *) NULL) continue; offset.y=((double) y+0.5)*scale.y-0.5; for (x=0; x < (ssize_t) resize_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait resize_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; offset.x=((double) x+0.5)*scale.x-0.5; status=InterpolatePixelChannels(image,image_view,resize_image,method, offset.x,offset.y,q,exception); if (status == MagickFalse) break; } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) resize_image=DestroyImage(resize_image); return(resize_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image,const size_t columns, % const size_t rows,const double delta_x,const double rigidity, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns, const size_t rows,const double delta_x,const double rigidity, ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" CacheView *image_view, *rescale_view; gfloat *packet, *pixels; Image *rescale_image; int x_offset, y_offset; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MemoryInfo *pixel_info; gfloat *q; ssize_t y; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ResizeImage(image,columns,rows,image->filter,exception)); pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels* sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) return((Image *) NULL); pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info); status=MagickTrue; q=pixels; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) *q++=QuantumScale*p[i]; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows, (int) GetPixelChannels(image),LQR_COLDEPTH_32F); if (carver == (LqrCarver *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_carver_set_preserve_input_image(carver); lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows); (void) lqr_status; rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse) { pixel_info=RelinquishVirtualMemory(pixel_info); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } rescale_view=AcquireAuthenticCacheView(rescale_image,exception); (void) lqr_carver_scan_reset(carver); while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0) { Quantum *magick_restrict p; ssize_t i; p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1, exception); if (p == (Quantum *) NULL) break; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait rescale_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); rescale_traits=GetPixelChannelTraits(rescale_image,channel); if ((traits == UndefinedPixelTrait) || (rescale_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange* packet[i]),p); } if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse) break; } rescale_view=DestroyCacheView(rescale_view); pixel_info=RelinquishVirtualMemory(pixel_info); lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const size_t magick_unused(columns),const size_t magick_unused(rows), const double magick_unused(delta_x),const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() doubles the size of the image with a pixel art scaling % algorithm. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void CopyPixels(const Quantum *source,const ssize_t source_offset, Quantum *destination,const ssize_t destination_offset,const size_t channels) { ssize_t i; for (i=0; i < (ssize_t) channels; i++) destination[channels*destination_offset+i]=source[source_offset*channels+i]; } static inline void MixPixels(const Quantum *source,const ssize_t *source_offset, const size_t source_size,Quantum *destination, const ssize_t destination_offset,const size_t channels) { ssize_t sum; ssize_t i; for (i=0; i < (ssize_t) channels; i++) { ssize_t j; sum=0; for (j=0; j < (ssize_t) source_size; j++) sum+=source[source_offset[j]*channels+i]; destination[channels*destination_offset+i]=(Quantum) (sum/source_size); } } static inline void Mix2Pixels(const Quantum *source, const ssize_t source_offset1,const ssize_t source_offset2, Quantum *destination,const ssize_t destination_offset,const size_t channels) { const ssize_t offsets[2] = { source_offset1, source_offset2 }; MixPixels(source,offsets,2,destination,destination_offset,channels); } static inline int PixelsEqual(const Quantum *source1,ssize_t offset1, const Quantum *source2,ssize_t offset2,const size_t channels) { ssize_t i; offset1*=channels; offset2*=channels; for (i=0; i < (ssize_t) channels; i++) if (source1[offset1+i] != source2[offset2+i]) return(0); return(1); } static inline void Eagle2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { ssize_t i; (void) source; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); if (PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,1,pixels,3,channels)) CopyPixels(pixels,0,result,0,channels); if (PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels)) CopyPixels(pixels,2,result,1,channels); if (PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels)) CopyPixels(pixels,6,result,2,channels); if (PixelsEqual(pixels,5,pixels,8,channels) && PixelsEqual(pixels,8,pixels,7,channels)) CopyPixels(pixels,8,result,3,channels); } static void Hq2XHelper(const unsigned int rule,const Quantum *source, Quantum *destination,const ssize_t destination_offset,const size_t channels, const ssize_t e,const ssize_t a,const ssize_t b,const ssize_t d, const ssize_t f,const ssize_t h) { #define caseA(N,A,B,C,D) \ case N: \ { \ const ssize_t \ offsets[4] = { A, B, C, D }; \ \ MixPixels(source,offsets,4,destination,destination_offset,channels);\ break; \ } #define caseB(N,A,B,C,D,E,F,G,H) \ case N: \ { \ const ssize_t \ offsets[8] = { A, B, C, D, E, F, G, H }; \ \ MixPixels(source,offsets,8,destination,destination_offset,channels);\ break; \ } switch (rule) { case 0: { CopyPixels(source,e,destination,destination_offset,channels); break; } caseA(1,e,e,e,a) caseA(2,e,e,e,d) caseA(3,e,e,e,b) caseA(4,e,e,d,b) caseA(5,e,e,a,b) caseA(6,e,e,a,d) caseB(7,e,e,e,e,e,b,b,d) caseB(8,e,e,e,e,e,d,d,b) caseB(9,e,e,e,e,e,e,d,b) caseB(10,e,e,d,d,d,b,b,b) case 11: { const ssize_t offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b }; MixPixels(source,offsets,16,destination,destination_offset,channels); break; } case 12: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[4] = { e, e, d, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 13: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, d, d, d, b, b, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 14: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b }; MixPixels(source,offsets,16,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 15: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[4] = { e, e, d, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 16: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, e, d, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 17: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, d, d, d, b, b, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 18: { if (PixelsEqual(source,b,source,f,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, b, b, d }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, d }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } default: { if (PixelsEqual(source,d,source,h,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, d, d, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } } #undef caseA #undef caseB } static inline unsigned int Hq2XPatternToNumber(const int *pattern) { ssize_t i; unsigned int result, order; result=0; order=1; for (i=7; i >= 0; i--) { result+=order*pattern[i]; order*=2; } return(result); } static inline void Hq2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { static const unsigned int Hq2XTable[] = { 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 12, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 12, 12, 5, 19, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 1, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 18, 5, 3, 16, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 13, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 1, 12, 5, 3, 1, 14 }; const int pattern1[] = { !PixelsEqual(pixels,4,pixels,8,channels), !PixelsEqual(pixels,4,pixels,7,channels), !PixelsEqual(pixels,4,pixels,6,channels), !PixelsEqual(pixels,4,pixels,5,channels), !PixelsEqual(pixels,4,pixels,3,channels), !PixelsEqual(pixels,4,pixels,2,channels), !PixelsEqual(pixels,4,pixels,1,channels), !PixelsEqual(pixels,4,pixels,0,channels) }; #define Rotated(p) p[2], p[4], p[7], p[1], p[6], p[0], p[3], p[5] const int pattern2[] = { Rotated(pattern1) }; const int pattern3[] = { Rotated(pattern2) }; const int pattern4[] = { Rotated(pattern3) }; #undef Rotated (void) source; Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern1)],pixels,result,0, channels,4,0,1,3,5,7); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern2)],pixels,result,1, channels,4,2,5,1,7,3); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern3)],pixels,result,3, channels,4,8,7,5,3,1); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern4)],pixels,result,2, channels,4,6,3,7,1,5); } static void Fish2X(const Image *source,const Quantum *pixels,Quantum *result, const size_t channels) { #define Corner(A,B,C,D) \ { \ if (intensities[B] > intensities[A]) \ { \ const ssize_t \ offsets[3] = { B, C, D }; \ \ MixPixels(pixels,offsets,3,result,3,channels); \ } \ else \ { \ const ssize_t \ offsets[3] = { A, B, C }; \ \ MixPixels(pixels,offsets,3,result,3,channels); \ } \ } #define Line(A,B,C,D) \ { \ if (intensities[C] > intensities[A]) \ Mix2Pixels(pixels,C,D,result,3,channels); \ else \ Mix2Pixels(pixels,A,B,result,3,channels); \ } const ssize_t pixels_offsets[4] = { 0, 1, 3, 4 }; MagickFloatType intensities[9]; int ae, bd, ab, ad, be, de; ssize_t i; for (i=0; i < 9; i++) intensities[i]=GetPixelIntensity(source,pixels + i*channels); CopyPixels(pixels,0,result,0,channels); CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[1] ? 0 : 1),result, 1,channels); CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[3] ? 0 : 3),result, 2,channels); ae=PixelsEqual(pixels,0,pixels,4,channels); bd=PixelsEqual(pixels,1,pixels,3,channels); ab=PixelsEqual(pixels,0,pixels,1,channels); de=PixelsEqual(pixels,3,pixels,4,channels); ad=PixelsEqual(pixels,0,pixels,3,channels); be=PixelsEqual(pixels,1,pixels,4,channels); if (ae && bd && ab) { CopyPixels(pixels,0,result,3,channels); return; } if (ad && de && !ab) { Corner(1,0,4,3) return; } if (be && de && !ab) { Corner(0,1,3,4) return; } if (ad && ab && !be) { Corner(4,3,1,0) return; } if (ab && be && !ad) { Corner(3,0,4,1) return; } if (ae && (!bd || intensities[1] > intensities[0])) { Mix2Pixels(pixels,0,4,result,3,channels); return; } if (bd && (!ae || intensities[0] > intensities[1])) { Mix2Pixels(pixels,1,3,result,3,channels); return; } if (ab) { Line(0,1,3,4) return; } if (de) { Line(3,4,0,1) return; } if (ad) { Line(0,3,1,4) return; } if (be) { Line(1,4,0,3) return; } MixPixels(pixels,pixels_offsets,4,result,3,channels); #undef Corner #undef Line } static void Xbr2X(const Image *magick_unused(source),const Quantum *pixels, Quantum *result,const size_t channels) { #define WeightVar(M,N) const int w_##M##_##N = \ PixelsEqual(pixels,M,pixels,N,channels) ? 0 : 1; WeightVar(12,11) WeightVar(12,7) WeightVar(12,13) WeightVar(12,17) WeightVar(12,16) WeightVar(12,8) WeightVar(6,10) WeightVar(6,2) WeightVar(11,7) WeightVar(11,17) WeightVar(11,5) WeightVar(7,13) WeightVar(7,1) WeightVar(12,6) WeightVar(12,18) WeightVar(8,14) WeightVar(8,2) WeightVar(13,17) WeightVar(13,9) WeightVar(7,3) WeightVar(16,10) WeightVar(16,22) WeightVar(17,21) WeightVar(11,15) WeightVar(18,14) WeightVar(18,22) WeightVar(17,23) WeightVar(17,19) #undef WeightVar magick_unreferenced(source); if ( w_12_16 + w_12_8 + w_6_10 + w_6_2 + (4 * w_11_7) < w_11_17 + w_11_5 + w_7_13 + w_7_1 + (4 * w_12_6) ) Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_7 ? 11 : 7),12,result,0, channels); else CopyPixels(pixels,12,result,0,channels); if ( w_12_18 + w_12_6 + w_8_14 + w_8_2 + (4 * w_7_13) < w_13_17 + w_13_9 + w_11_7 + w_7_3 + (4 * w_12_8) ) Mix2Pixels(pixels,(ssize_t) (w_12_7 <= w_12_13 ? 7 : 13),12,result,1, channels); else CopyPixels(pixels,12,result,1,channels); if ( w_12_6 + w_12_18 + w_16_10 + w_16_22 + (4 * w_11_17) < w_11_7 + w_11_15 + w_13_17 + w_17_21 + (4 * w_12_16) ) Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_17 ? 11 : 17),12,result,2, channels); else CopyPixels(pixels,12,result,2,channels); if ( w_12_8 + w_12_16 + w_18_14 + w_18_22 + (4 * w_13_17) < w_11_17 + w_17_23 + w_17_19 + w_7_13 + (4 * w_12_18) ) Mix2Pixels(pixels,(ssize_t) (w_12_13 <= w_12_17 ? 13 : 17),12,result,3, channels); else CopyPixels(pixels,12,result,3,channels); } static void Scale2X(const Image *magick_unused(source),const Quantum *pixels, Quantum *result,const size_t channels) { magick_unreferenced(source); if (PixelsEqual(pixels,1,pixels,7,channels) || PixelsEqual(pixels,3,pixels,5,channels)) { ssize_t i; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); return; } if (PixelsEqual(pixels,1,pixels,3,channels)) CopyPixels(pixels,3,result,0,channels); else CopyPixels(pixels,4,result,0,channels); if (PixelsEqual(pixels,1,pixels,5,channels)) CopyPixels(pixels,5,result,1,channels); else CopyPixels(pixels,4,result,1,channels); if (PixelsEqual(pixels,3,pixels,7,channels)) CopyPixels(pixels,3,result,2,channels); else CopyPixels(pixels,4,result,2,channels); if (PixelsEqual(pixels,5,pixels,7,channels)) CopyPixels(pixels,5,result,3,channels); else CopyPixels(pixels,4,result,3,channels); } static void Epbx2X(const Image *magick_unused(source),const Quantum *pixels, Quantum *result,const size_t channels) { #define HelperCond(a,b,c,d,e,f,g) ( \ PixelsEqual(pixels,a,pixels,b,channels) && ( \ PixelsEqual(pixels,c,pixels,d,channels) || \ PixelsEqual(pixels,c,pixels,e,channels) || \ PixelsEqual(pixels,a,pixels,f,channels) || \ PixelsEqual(pixels,b,pixels,g,channels) \ ) \ ) ssize_t i; magick_unreferenced(source); for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); if ( !PixelsEqual(pixels,3,pixels,5,channels) && !PixelsEqual(pixels,1,pixels,7,channels) && ( PixelsEqual(pixels,4,pixels,3,channels) || PixelsEqual(pixels,4,pixels,7,channels) || PixelsEqual(pixels,4,pixels,5,channels) || PixelsEqual(pixels,4,pixels,1,channels) || ( ( !PixelsEqual(pixels,0,pixels,8,channels) || PixelsEqual(pixels,4,pixels,6,channels) || PixelsEqual(pixels,3,pixels,2,channels) ) && ( !PixelsEqual(pixels,6,pixels,2,channels) || PixelsEqual(pixels,4,pixels,0,channels) || PixelsEqual(pixels,4,pixels,8,channels) ) ) ) ) { if (HelperCond(1,3,4,0,8,2,6)) Mix2Pixels(pixels,1,3,result,0,channels); if (HelperCond(5,1,4,2,6,8,0)) Mix2Pixels(pixels,5,1,result,1,channels); if (HelperCond(3,7,4,6,2,0,8)) Mix2Pixels(pixels,3,7,result,2,channels); if (HelperCond(7,5,4,8,0,6,2)) Mix2Pixels(pixels,7,5,result,3,channels); } #undef HelperCond } static inline void Eagle3X(const Image *magick_unused(source), const Quantum *pixels,Quantum *result,const size_t channels) { ssize_t corner_tl, corner_tr, corner_bl, corner_br; magick_unreferenced(source); corner_tl=PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,0,pixels,3,channels); corner_tr=PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels); corner_bl=PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels); corner_br=PixelsEqual(pixels,5,pixels,7,channels) && PixelsEqual(pixels,7,pixels,8,channels); CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels); if (corner_tl && corner_tr) Mix2Pixels(pixels,0,2,result,1,channels); else CopyPixels(pixels,4,result,1,channels); CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels); if (corner_tl && corner_bl) Mix2Pixels(pixels,0,6,result,3,channels); else CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); if (corner_tr && corner_br) Mix2Pixels(pixels,2,8,result,5,channels); else CopyPixels(pixels,4,result,5,channels); CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels); if (corner_bl && corner_br) Mix2Pixels(pixels,6,8,result,7,channels); else CopyPixels(pixels,4,result,7,channels); CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels); } static inline void Eagle3XB(const Image *magick_unused(source), const Quantum *pixels,Quantum *result,const size_t channels) { ssize_t corner_tl, corner_tr, corner_bl, corner_br; magick_unreferenced(source); corner_tl=PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,0,pixels,3,channels); corner_tr=PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels); corner_bl=PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels); corner_br=PixelsEqual(pixels,5,pixels,7,channels) && PixelsEqual(pixels,7,pixels,8,channels); CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels); CopyPixels(pixels,4,result,1,channels); CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels); CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); CopyPixels(pixels,4,result,5,channels); CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels); CopyPixels(pixels,4,result,7,channels); CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels); } static inline void Scale3X(const Image *magick_unused(source), const Quantum *pixels,Quantum *result,const size_t channels) { magick_unreferenced(source); if (!PixelsEqual(pixels,1,pixels,7,channels) && !PixelsEqual(pixels,3,pixels,5,channels)) { if (PixelsEqual(pixels,3,pixels,1,channels)) CopyPixels(pixels,3,result,0,channels); else CopyPixels(pixels,4,result,0,channels); if ( ( PixelsEqual(pixels,3,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,2,channels) ) || ( PixelsEqual(pixels,5,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,0,channels) ) ) CopyPixels(pixels,1,result,1,channels); else CopyPixels(pixels,4,result,1,channels); if (PixelsEqual(pixels,5,pixels,1,channels)) CopyPixels(pixels,5,result,2,channels); else CopyPixels(pixels,4,result,2,channels); if ( ( PixelsEqual(pixels,3,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,6,channels) ) || ( PixelsEqual(pixels,3,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,0,channels) ) ) CopyPixels(pixels,3,result,3,channels); else CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); if ( ( PixelsEqual(pixels,5,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,8,channels) ) || ( PixelsEqual(pixels,5,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,2,channels) ) ) CopyPixels(pixels,5,result,5,channels); else CopyPixels(pixels,4,result,5,channels); if (PixelsEqual(pixels,3,pixels,7,channels)) CopyPixels(pixels,3,result,6,channels); else CopyPixels(pixels,4,result,6,channels); if ( ( PixelsEqual(pixels,3,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,8,channels) ) || ( PixelsEqual(pixels,5,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,6,channels) ) ) CopyPixels(pixels,7,result,7,channels); else CopyPixels(pixels,4,result,7,channels); if (PixelsEqual(pixels,5,pixels,7,channels)) CopyPixels(pixels,5,result,8,channels); else CopyPixels(pixels,4,result,8,channels); } else { ssize_t i; for (i=0; i < 9; i++) CopyPixels(pixels,4,result,i,channels); } } MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { #define MagnifyImageTag "Magnify/Image" CacheView *image_view, *magnify_view; const char *option; Image *source_image, *magnify_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo rectangle; ssize_t y; unsigned char magnification, width; void (*scaling_method)(const Image *,const Quantum *,Quantum *,size_t); /* Initialize magnified image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); option=GetImageOption(image->image_info,"magnify:method"); if (option == (char *) NULL) option="scale2x"; scaling_method=Scale2X; magnification=1; width=1; switch (*option) { case 'e': { if (LocaleCompare(option,"eagle2x") == 0) { scaling_method=Eagle2X; magnification=2; width=3; break; } if (LocaleCompare(option,"eagle3x") == 0) { scaling_method=Eagle3X; magnification=3; width=3; break; } if (LocaleCompare(option,"eagle3xb") == 0) { scaling_method=Eagle3XB; magnification=3; width=3; break; } if (LocaleCompare(option,"epbx2x") == 0) { scaling_method=Epbx2X; magnification=2; width=3; break; } break; } case 'f': { if (LocaleCompare(option,"fish2x") == 0) { scaling_method=Fish2X; magnification=2; width=3; break; } break; } case 'h': { if (LocaleCompare(option,"hq2x") == 0) { scaling_method=Hq2X; magnification=2; width=3; break; } break; } case 's': { if (LocaleCompare(option,"scale2x") == 0) { scaling_method=Scale2X; magnification=2; width=3; break; } if (LocaleCompare(option,"scale3x") == 0) { scaling_method=Scale3X; magnification=3; width=3; break; } break; } case 'x': { if (LocaleCompare(option,"xbr2x") == 0) { scaling_method=Xbr2X; magnification=2; width=5; } break; } default: break; } /* Make a working copy of the source image and convert it to RGB colorspace. */ source_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (source_image == (Image *) NULL) return((Image *) NULL); offset.x=0; offset.y=0; rectangle.x=0; rectangle.y=0; rectangle.width=image->columns; rectangle.height=image->rows; (void) CopyImagePixels(source_image,image,&rectangle,&offset,exception); (void) SetImageColorspace(source_image,RGBColorspace,exception); magnify_image=CloneImage(source_image,magnification*source_image->columns, magnification*source_image->rows,MagickTrue,exception); if (magnify_image == (Image *) NULL) { source_image=DestroyImage(source_image); return((Image *) NULL); } /* Magnify the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(source_image,exception); magnify_view=AcquireAuthenticCacheView(magnify_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,magnify_image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { Quantum r[128]; /* to hold result pixels */ Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(magnify_view,0,magnification*y, magnify_image->columns,magnification,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } /* Magnify this row of pixels. */ for (x=0; x < (ssize_t) source_image->columns; x++) { const Quantum *magick_restrict p; size_t channels; ssize_t i; ssize_t j; p=GetCacheViewVirtualPixels(image_view,x-width/2,y-width/2,width,width, exception); channels=GetPixelChannels(source_image); scaling_method(source_image,p,r,channels); /* Copy the result pixels into the final image. */ for (j=0; j < (ssize_t) magnification; j++) for (i=0; i < (ssize_t) (channels*magnification); i++) q[j*channels*magnify_image->columns+i]=r[j*magnification*channels+i]; q+=magnification*GetPixelChannels(magnify_image); } if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MagnifyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } magnify_view=DestroyCacheView(magnify_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); if (status == MagickFalse) magnify_image=DestroyImage(magnify_image); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally to % half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter, exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterType filter, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterType filter,ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; size_t height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ? DefaultResolution : image->resolution.x)+0.5); height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ? DefaultResolution : image->resolution.y)+0.5); resample_image=ResizeImage(image,width,height,filter,exception); if (resample_image != (Image *) NULL) { resample_image->resolution.x=x_resolution; resample_image->resolution.y=y_resolution; } return(resample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using the given % filter (see AcquireFilterInfo()). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const size_t columns,const size_t rows, % const FilterType filter,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { double weight; ssize_t pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { ssize_t i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishAlignedMemory( contribution[i]); contribution=(ContributionInfo **) RelinquishMagickMemory(contribution); return(contribution); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { ssize_t i; ContributionInfo **contribution; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) memset(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (ssize_t) number_threads; i++) { contribution[i]=(ContributionInfo *) MagickAssumeAligned( AcquireAlignedMemory(count,sizeof(**contribution))); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static MagickBooleanType HorizontalFilter( const ResizeFilter *magick_restrict resize_filter, const Image *magick_restrict image,Image *magick_restrict resize_image, const double x_factor,const MagickSizeType span, MagickOffsetType *magick_restrict progress,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; MagickBooleanType status; double scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->columns,1) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { const int id = GetOpenMPThreadId(); double bisect, density; const Quantum *magick_restrict p; ContributionInfo *magick_restrict contribution; Quantum *magick_restrict q; ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (x+0.5)/x_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) resize_image->rows; y++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j-start].pixel-contribution[0].pixel); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } /* Alpha blending. */ gamma=0.0; for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight*QuantumScale* GetPixelAlpha(image,p+k*GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif (*progress)++; proceed=SetImageProgress(image,ResizeImageTag,*progress,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } static MagickBooleanType VerticalFilter( const ResizeFilter *magick_restrict resize_filter, const Image *magick_restrict image,Image *magick_restrict resize_image, const double y_factor,const MagickSizeType span, MagickOffsetType *magick_restrict progress,ExceptionInfo *exception) { CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; double scale, support; MagickBooleanType status; ssize_t y; /* Apply filter to resize vertically from image to resize image. */ scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { const int id = GetOpenMPThreadId(); double bisect, density; const Quantum *magick_restrict p; ContributionInfo *magick_restrict contribution; Quantum *magick_restrict q; ssize_t x; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (y+0.5)/y_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel, image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1), exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) resize_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)* image->columns+x); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } gamma=0.0; for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k* GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif (*progress)++; proceed=SetImageProgress(image,ResizeImageTag,*progress,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } MagickExport Image *ResizeImage(const Image *image,const size_t columns, const size_t rows,const FilterType filter,ExceptionInfo *exception) { double x_factor, y_factor; FilterType filter_type; Image *filter_image, *resize_image; MagickOffsetType offset; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; /* Acquire resize image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter)) return(CloneImage(image,0,0,MagickTrue,exception)); /* Acquire resize filter. */ x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->alpha_trait != UndefinedPixelTrait) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) resize_image=AccelerateResizeImage(image,columns,rows,resize_filter, exception); if (resize_image != (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } #endif resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } if (x_factor > y_factor) filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); else filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(DestroyImage(resize_image)); } /* Resize image. */ offset=0; if (x_factor > y_factor) { span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &offset,exception); status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&offset,exception); } else { span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &offset,exception); status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&offset,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" CacheView *image_view, *sample_view; Image *sample_image; MagickBooleanType status; MagickOffsetType progress; ssize_t x1; ssize_t *x_offset, y; PointInfo sample_offset; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Set the sampling offset, default is in the mid-point of sample regions. */ sample_offset.x=sample_offset.y=0.5-MagickEpsilon; { const char *value; value=GetImageArtifact(image,"sample:offset"); if (value != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; (void) ParseGeometry(value,&geometry_info); flags=ParseGeometry(value,&geometry_info); sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon; if ((flags & SigmaValue) != 0) sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon; } } /* Allocate scan line buffer and column offset buffers. */ x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); if (x_offset == (ssize_t *) NULL) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (x1=0; x1 < (ssize_t) sample_image->columns; x1++) x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/ sample_image->columns); /* Sample each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sample_view=AcquireAuthenticCacheView(sample_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,sample_image,sample_image->rows,1) #endif for (y=0; y < (ssize_t) sample_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; ssize_t y_offset; if (status == MagickFalse) continue; y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/ sample_image->rows); p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Sample each column. */ for (x=0; x < (ssize_t) sample_image->columns; x++) { ssize_t i; if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(sample_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++) { PixelChannel channel; PixelTrait image_traits, traits; channel=GetPixelChannelChannel(sample_image,i); traits=GetPixelChannelTraits(sample_image,channel); image_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (image_traits == UndefinedPixelTrait)) continue; SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels( image)+i],q); } q+=GetPixelChannels(sample_image); } if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sample_view=DestroyCacheView(sample_view); x_offset=(ssize_t *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; if (status == MagickFalse) sample_image=DestroyImage(sample_image); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" CacheView *image_view, *scale_view; double alpha, pixel[CompositePixelChannel], *scale_scanline, *scanline, *x_vector, *y_vector; Image *scale_image; MagickBooleanType next_column, next_row, proceed, status; PixelTrait scale_traits; PointInfo scale, span; ssize_t i; ssize_t n, number_rows, y; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse) { scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*scanline)); scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns, MaxPixelChannels*sizeof(*scale_scanline)); y_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*y_vector)); if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) || (x_vector == (double *) NULL) || (y_vector == (double *) NULL)) { if ((image->rows != scale_image->rows) && (scanline != (double *) NULL)) scanline=(double *) RelinquishMagickMemory(scanline); if (scale_scanline != (double *) NULL) scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (x_vector != (double *) NULL) x_vector=(double *) RelinquishMagickMemory(x_vector); if (y_vector != (double *) NULL) y_vector=(double *) RelinquishMagickMemory(y_vector); scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) memset(y_vector,0,(size_t) MaxPixelChannels*image->columns* sizeof(*y_vector)); n=0; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); scale_view=AcquireAuthenticCacheView(scale_image,exception); for (y=0; y < (ssize_t) scale_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) break; q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } alpha=1.0; if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; } for (x=0; x < (ssize_t) image->columns; x++) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) y_vector[x*GetPixelChannels(image)+i]+=scale.y* x_vector[x*GetPixelChannels(image)+i]; span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; next_row=MagickFalse; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y* x_vector[x*GetPixelChannels(image)+i]; scanline[x*GetPixelChannels(image)+i]=pixel[i]; y_vector[x*GetPixelChannels(image)+i]=0.0; } } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[ x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } else { ssize_t t; /* Scale X direction. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; span.x=1.0; t=0; for (x=0; x < (ssize_t) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i]; scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; } scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i]; span.x-=scale.x; } } if (span.x > 0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i]; } if ((next_column == MagickFalse) && (t < (ssize_t) scale_image->columns)) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scale_scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha* scale_scanline[x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse) { status=MagickFalse; break; } proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } scale_view=DestroyCacheView(scale_view); image_view=DestroyCacheView(image_view); /* Free allocated memory. */ y_vector=(double *) RelinquishMagickMemory(y_vector); scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(double *) RelinquishMagickMemory(scanline); x_vector=(double *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; if (status == MagickFalse) scale_image=DestroyImage(scale_image); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleFactor 5 char filename[MagickPathExtent], value[MagickPathExtent]; const char *name; Image *linear_image, *thumbnail_image; struct stat attributes; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); if (linear_image == (Image *) NULL) return((Image *) NULL); (void) SetImageColorspace(linear_image,RGBColorspace,exception); linear_image->filter=LanczosSharpFilter; thumbnail_image=DistortResizeImage(linear_image,columns,rows,exception); linear_image=DestroyImage(linear_image); if (thumbnail_image == (Image *) NULL) return((Image *) NULL); /* Set sRGB colorspace and remove color profiles and comments. */ (void) SetImageColorspace(thumbnail_image,sRGBColorspace,exception); (void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; ResetImageProfileIterator(thumbnail_image); for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; ) { if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) { (void) DeleteImageProfile(thumbnail_image,name); ResetImageProfileIterator(thumbnail_image); } name=GetNextImageProfile(thumbnail_image); } (void) DeleteImageProperty(thumbnail_image,"comment"); /* Set Thumb properties. */ (void) CopyMagickString(value,image->magick_filename,MagickPathExtent); if (strstr(image->magick_filename,"//") == (char *) NULL) (void) FormatLocaleString(value,MagickPathExtent,"file://%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception); GetPathComponent(image->magick_filename,TailPath,filename); (void) CopyMagickString(value,filename,MagickPathExtent); if ( GetPathAttributes(image->filename,&attributes) != MagickFalse ) (void) FormatImageProperty(thumbnail_image,"Thumb::MTime","%.20g",(double) attributes.st_mtime); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent, value); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception); (void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception); (void) SetImageProperty(thumbnail_image,"software",MagickAuthoritativeURL, exception); (void) FormatImageProperty(thumbnail_image,"Thumb::Image::Width","%.20g", (double) image->magick_columns); (void) FormatImageProperty(thumbnail_image,"Thumb::Image::Height","%.20g", (double) image->magick_rows); (void) FormatImageProperty(thumbnail_image,"Thumb::Document::Pages","%.20g", (double) GetImageListLength(image)); return(thumbnail_image); }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/LoopHint.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class VersionTuple; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. mutable IdentifierInfo *Ident_instancetype; /// \brief Identifier for "introduced". IdentifierInfo *Ident_introduced; /// \brief Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// \brief Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// \brief Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// \brief Identifier for "message". IdentifierInfo *Ident_message; /// \brief Identifier for "strict". IdentifierInfo *Ident_strict; /// \brief Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++0x contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// \brief When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// \brief RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } unsigned getDepth() const { return Depth; } }; /// Factory object for creating AttributeList objects. AttributeFactory AttrFactory; /// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// \brief Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// \brief Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// \brief Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed); PP.Lex(Tok); PP.EnterToken(Next); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) --ParenCount; // Don't let unbalanced )'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) --BracketCount; // Don't let unbalanced ]'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) --BraceCount; // Don't let unbalanced }'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// \brief Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// \brief Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// \brief Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// \brief Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// \brief Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// \brief Initialize all pragma handlers. void initializePragmaHandlers(); /// \brief Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// \brief Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// \brief Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// \brief Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// \brief Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// \brief Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// \brief Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// \brief Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// \brief Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// \brief Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// \brief Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// \brief Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// \brief Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// \brief Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// \brief Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// \brief Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// \brief Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC1); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// \brief Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// \brief The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// \brief The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// \brief Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// \brief RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// \brief Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// \brief Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// \brief The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// \brief Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// \brief Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// \brief Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// \brief Whether this class is an __interface. bool IsInterface : 1; /// \brief The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// \brief The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// \brief RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// \brief Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// \brief Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// \brief The kind of template we are parsing. enum { /// \brief We are not parsing a template at all. NonTemplate = 0, /// \brief We are parsing a template declaration. Template, /// \brief We are parsing an explicit specialization. ExplicitSpecialization, /// \brief We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// \brief The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// \brief The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// \brief The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// \brief Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, AttributeList *AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers& VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList( SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> Completer = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false); //===--------------------------------------------------------------------===// // C++0x 5.1.2: Lambda expressions // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro, bool *SkippedInits = nullptr); bool TryParseLambdaIntroducer(LambdaIntroducer &Intro); ExprResult ParseLambdaExpressionAfterIntroducer( LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while condition expression. Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, bool AllowOpenMPStandalone = false); enum AllowedConstructsKind { /// \brief Allow any declarations, statements, OpenMP directives. ACK_Any, /// \brief Allow only statements and non-standalone OpenMP directives. ACK_StatementsOpenMPNonStandalone, /// \brief Allow statements and all executable OpenMP directives ACK_StatementsOpenMPAnyExecutable }; StmtResult ParseStatementOrDeclaration(StmtVector &Stmts, AllowedConstructsKind Allowed, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, AllowedConstructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs); StmtResult ParseCaseStatement(bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, AllowedConstructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// \brief Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// \brief Parse the block; this code is always used. IEB_Parse, /// \brief Skip the block entirely; this code is never used. IEB_Skip, /// \brief Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// \brief Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// \brief The location of the initial keyword. SourceLocation KeywordLoc; /// \brief Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// \brief Nested-name-specifier preceding the name. CXXScopeSpec SS; /// \brief The name we're looking for. UnqualifiedId Name; /// \brief The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, AccessSpecifier& CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// \brief When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// \brief Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// \brief Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// \brief Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// \brief Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// \brief Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// \brief Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. Error ///< Can't be any of the above! }; /// \brief Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// \brief Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *HasMissingTypename = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// \brief Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &attrs, SourceLocation FixItLoc = SourceLocation()) { if (!attrs.Range.isValid()) return; DiagnoseProhibitedAttributes(attrs, FixItLoc); attrs.clear(); } void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// \brief Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// \brief Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// \brief Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// \brief Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// \brief Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); void ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc, std::vector<IdentifierInfo *> &Ident, std::vector<SourceLocation> &NamespaceLoc, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, AttributeList *Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// \brief Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// \brief Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// \brief Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// \brief Parses declarative or executable directive. /// /// \param Allowed ACK_Any, if any directives are allowed, /// ACK_StatementsOpenMPAnyExecutable - if any executable directives are /// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone /// executable directives are allowed. /// StmtResult ParseOpenMPDeclarativeOrExecutableDirective(AllowedConstructsKind Allowed); /// \brief Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// \brief Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// \brief Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// \brief Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// \brief Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// \brief Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; CXXScopeSpec ReductionIdScopeSpec; DeclarationNameInfo ReductionId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none, AttributeList *AccessAttrs = nullptr); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, AccessSpecifier AS, AttributeList *AccessAttrs); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, AccessSpecifier AS=AS_none, AttributeList *AccessAttrs = nullptr); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool IsTemplateArgumentList(unsigned Skip = 0); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
GB_unop__minv_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_uint8_uint8) // op(A') function: GB (_unop_tran__minv_uint8_uint8) // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 8) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 8) ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 8) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_uint8_uint8) ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_uint8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
syr2k.dstblock3.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* syr2k.c: this file is part of PolyBench/C */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ #include "syr2k.h" /* Array initialization. */ static void init_array(int n, int m, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(C,N,N,n,n), DATA_TYPE POLYBENCH_2D(A,N,M,n,m), DATA_TYPE POLYBENCH_2D(B,N,M,n,m)) { int i, j; *alpha = 1.5; *beta = 1.2; for (i = 0; i < n; i++) for (j = 0; j < m; j++) { A[i][j] = (DATA_TYPE) ((i*j+1)%n) / n; B[i][j] = (DATA_TYPE) ((i*j+2)%m) / m; } for (i = 0; i < n; i++) for (j = 0; j < n; j++) { C[i][j] = (DATA_TYPE) ((i*j+3)%n) / m; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n)) { int i, j; POLYBENCH_DUMP_START; POLYBENCH_DUMP_BEGIN("C"); for (i = 0; i < n; i++) for (j = 0; j < n; j++) { if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n"); fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, C[i][j]); } POLYBENCH_DUMP_END("C"); POLYBENCH_DUMP_FINISH; } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_syr2k(int n, int m, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(C,N,N,n,n), DATA_TYPE POLYBENCH_2D(A,N,M,n,m), DATA_TYPE POLYBENCH_2D(B,N,M,n,m), DATA_TYPE POLYBENCH_2D(At,M,N,m,n), DATA_TYPE POLYBENCH_2D(Bt,M,N,m,n)) { /* Sizes * the linesize is 64 bytes on keller and blum * on keller, L1,L2,L3 is 32 KB, 256 KB, 20480 KB * on blum, 32KB , 256 KB, 6 MB * 64 bits per word; each word is 8 bytes */ // Indices int i, j, k; int ii, jj; // PARAMETER 1 // make sure you change the data size when you change this too!!! int cacheSize = 256; // IN kilobytes !!! // PARAMETER 2 int jumpA = floor((cacheSize * 1024) / (4*8*8)); int jumpB = floor((cacheSize * 1024 ) / (18*8) ); int jump = jumpA; // Misc. Calculations int linesize = 8; // how many bytes per cache line? int blockcount = cacheSize * 1024 / linesize; // kb * (bytes / per kb) / (bytes / per cache line) //BLAS PARAMS //UPLO = 'L' //TRANS = 'N' //A is NxM //At is MxN //B is NxM //Bt is MxN //C is NxN #pragma scop // Note: I can't figure out how to // stack allocate the array; I do this beforehand // and it's untimed. for (ii=0; ii < _PB_N; ii += jump){ for(jj=0; jj < _PB_M; jj += jump){ for (i=ii; i < fmin(jump + ii, _PB_N); i++){ for (j=jj; j < fmin(jump + jj, _PB_M); j++){ // Transpose At[j][i] = A[i][j]; Bt[j][i] = B[i][j]; } } } } // At is M by N // Bt is M by N #pragma omp parallel for for (i = 0; i < _PB_N; i++) { for (j = 0; j <= i; j++) C[i][j] *= beta; for (k = 0; k < _PB_M; k++) for (j = 0; j <= i; j++) { C[i][j] += At[k][j]*alpha*B[i][k] + Bt[k][j]*alpha*A[i][k]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; double footprint = 8*(n*n + 2*n*m); // HAVERFORD added code double FP_ops = 3.0 * m * (n + 1) * n; // HAVERFORD added code #ifdef POLYBENCH_GFLOPS polybench_set_program_flops(FP_ops); // HAVERFORD addition #endif #if defined POLYFORD_VERBOSE printf("Starting %s, n=%8d, m=%8d, Footprint %8.4g M, Source FP ops=%8.4g G\n", __FILE__, n, m, footprint / (1024 * 1024), FP_ops/1000000000.0); #endif /* Variable declaration/allocation. */ DATA_TYPE alpha; DATA_TYPE beta; POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,N,M,n,m); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,N,M,n,m); POLYBENCH_2D_ARRAY_DECL(At,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(Bt,DATA_TYPE,M,N,m,n); /* Initialize array(s). */ init_array (n, m, &alpha, &beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_syr2k (n, m, alpha, beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(At), POLYBENCH_ARRAY(Bt)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C))); /* Be clean. */ POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
GB_unaryop__identity_uint8_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint8_bool // op(A') function: GB_tran__identity_uint8_bool // C type: uint8_t // A type: bool // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint8_bool ( uint8_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint8_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kahan_sum.c
#include <stdlib.h> #include <omp.h> double do_kahan_sum(double* restrict var, long ncells) { struct esum_type{ double sum; double correction; }; // Get the total number of threads and thread_id int nthreads = 1; int thread_id = 0; #ifdef _OPENMP nthreads = omp_get_num_threads(); thread_id = omp_get_thread_num(); #endif struct esum_type local; local.sum = 0.0; local.correction = 0.0; // Compute the range for which this thread is responsible. int tbegin = ncells * ( thread_id ) / nthreads; int tend = ncells * ( thread_id + 1 ) / nthreads; for (long i = tbegin; i < tend; i++) { double corrected_next_term = var[i] + local.correction; double new_sum = local.sum + local.correction; local.correction = corrected_next_term - (new_sum - local.sum); local.sum = new_sum; } static struct esum_type *thread; static double sum; #ifdef _OPENMP #pragma omp master thread = malloc(nthreads*sizeof(struct esum_type)); #pragma omp barrier thread[thread_id].sum = local.sum; thread[thread_id].correction = local.correction; // Now do sum across threads // Wait until all threads get here. #pragma omp barrier // Use a single thread to compute the sum of all threads static struct esum_type global; #pragma omp master { global.sum = 0.0; global.correction = 0.0; for ( int i = 0 ; i < nthreads ; i ++ ) { double corrected_next_term = thread[i].sum + thread[i].correction + global.correction; double new_sum = global.sum + global.correction; global.correction = corrected_next_term - (new_sum - global.sum); global.sum = new_sum; } sum = global.sum + global.correction; free(thread); } // end omp master #pragma omp barrier #else sum = local.sum + local.correction; #endif return(sum); }
GB_binop__pair_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_fp32) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__pair_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fp32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: float // A type: float // A pattern? 1 // B type: float // B pattern? 1 // BinaryOp: cij = 1 #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FP32 || GxB_NO_PAIR_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pair_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
gemm.c
/** * GEMM, General Matrix Multiplication * 通用矩阵乘法 */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include "gemm.h" void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float *m = calloc(rows*cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } /** * @brief gemm: generalized matrix multiplication的缩写, 泛型矩阵乘法 * gemm(...) = ALPHA * MatMul(A, B) + BETA *C, A和B可以通过TA和TB参数控制转置 * * @param TA, TB 分别指示A和B在进行矩阵乘法前是否先进行转置 * @param M, N, K (M, K)矩阵乘以(K, N)矩阵 * @param A, B A是(M, K)或(K, M)矩阵, B是(N, K)或(K, N)矩阵 * @param ALPHA, BETA gemm(...) = ALPHA * MatMul(A, B) + BETA *C * @param lda, ldb, ldc 分别表示A, B, C的元素循环间隔, * @param C 初始保存偏置量矩阵, 计算过程中会被计算结果覆盖, 最终用来保存计算结果 */ void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } // MatMul(A, B), A is (M, K), B is (K, N) void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; //#pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } // MatMul(A, B^T), A is (M, K), B is (N, K) void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; //#pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } // MatMul(A^T, B), A is (K, M), B is (K, N) void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { //fprintf(stdout, "start gemm_tn++++++++++\n"); int i,j,k; //int ant = 0; //int bnt = 0; //int cnt = 0; //int dnt = 0; //#pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[k*lda+i]; //if (abs(A_PART) > 0.000000) { // ++ant; //} for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; //if (i*ldc+j == 128 * 784) { // fprintf(stdout, "A[%d] = %f, B[%d] = %f\n", k*lda+i, A_PART, k*ldb+j, B[k*ldb+j]); //} //if (i*ldc+j > 128 * 784) { // ++cnt; // if (abs(C[i*ldc+j]) > 0.000000) { // ++dnt; // } // if (abs(B[k*ldb+j]) > 0.000000) { // ++bnt; // } //} } } } //if (M == 625 && N == 784) { // fprintf(stdout, "ant = %d, bnt = %d, cnt = %d, dnt = %d\n", ant, bnt, cnt, dnt); // fprintf(stdout, "ALPHA = %f\n", ALPHA); // fprintf(stdout, "M = %d, N = %d, K = %d\n", M, N, K); // fprintf(stdout, "lda = %d, ldb = %d, ldc = %d\n", lda, ldb, ldc); // fprintf(stdout, "C[127][0] = %f, C[128][0] = %f\n", C[127 * 784 - 1], C[128 * 784]); //} //fprintf(stdout, "finish gemm_tn----------\n"); } // MatMul(A^T, B^T), A is (K, M), B is (K, N) void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; //#pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } // TA和TB分别是A和B的转置标识符 // C始终是(M, N)的矩阵 // 返回值: alpha * MatMul(AA, BB) + beta * C // 上式中的AA和BB的取值: // | A, if TA is 0 | B, if TB is 0 // AA =| BB =| // | A^T, otherwise | B^T, otherwise // 具体计算由不同函数实现, 实现的时候注意保持始终对A和B都进行"行遍历", 提高计算效率 // TA = 0, TB = 0, MatMul(A, B) = gemm_nn(A, B) // TA = 1, TB = 0, MatMul(A, B) = gemm_tn(A, B) // TA = 0, TB = 1, MatMul(A, B) = gemm_nt(A, B) // TA = 1, TB = 1, MatMul(A, B) = gemm_tt(A, B) void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } if(!TA && !TB) gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(TA && !TB) gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(!TA && TB) gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } #ifdef GPU #include <math.h> void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); check_error(status); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_gpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaThreadSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,192,729,1600); time_gpu(0,0,384,196,1728); time_gpu(0,0,256,196,3456); time_gpu(0,0,256,196,2304); time_gpu(0,0,128,4096,12544); time_gpu(0,0,128,4096,4096); */ time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,576,12544); time_gpu(0,0,256,2304,784); time_gpu(1,1,2304,256,784); time_gpu(0,0,512,4608,196); time_gpu(1,1,4608,512,196); return 0; } #endif
GB_binop__bxnor_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__bxnor_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__bxnor_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint32) // A*D function (colscale): GB (_AxD__bxnor_uint32) // D*A function (rowscale): GB (_DxB__bxnor_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint32) // C=scalar+B GB (_bind1st__bxnor_uint32) // C=scalar+B' GB (_bind1st_tran__bxnor_uint32) // C=A+scalar GB (_bind2nd__bxnor_uint32) // C=A'+scalar GB (_bind2nd_tran__bxnor_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_UINT32 || GxB_NO_BXNOR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxnor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxnor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxnor_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxnor_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxnor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
shape.h
/* * shape.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef SHAPE_H_ #define SHAPE_H_ #include <cstring> #include <cstdio> #include "../dll.h" #include "../nd4jmalloc.h" #include "../templatemath.h" #include "../helpers/logger.h" #include "../pointercast.h" #include "../cnpy/cnpy.h" #include <op_boilerplate.h> #define MAX_DIMENSION 0x7fffffff #define MAX_NUM_THREADS 1024 #define MAX_RANK 32 #define MAX_COORD 3 #define PREALLOC_SIZE 33554432 #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #include <helpers/sharedmem.h> #endif #ifdef __CUDACC__ #define INLINEDEF inline #else #define INLINEDEF inline #endif #include "../pairwise_util.h" #include <stdint.h> #include <array/ArrayOptions.h> namespace shape { /** * Shape information approximating * the information on an ndarray */ struct ND4J_EXPORT ShapeInformation { _CUDA_HD ShapeInformation(Nd4jLong *shape_ = nullptr, Nd4jLong *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0) : shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_) {} Nd4jLong *shape; Nd4jLong *stride; char order; int rank; int offset; int elementWiseStride; }; /** * Indexing information * for bounds checking */ struct ND4J_EXPORT CurrentIndexing { int numElementsPerThread; int blockStartingIndex; int startingThreadIndex; int endingThreadIndex; }; ND4J_EXPORT _CUDA_HD bool shapeEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2); ND4J_EXPORT _CUDA_HD Nd4jLong* detachShape(Nd4jLong *originalShape); ND4J_EXPORT _CUDA_HD Nd4jLong* copyShape(Nd4jLong *originalShape); ND4J_EXPORT _CUDA_HD bool shapeEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2); ND4J_EXPORT _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2); ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2); ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1,Nd4jLong *stride2,int rank2); ND4J_EXPORT _CUDA_HD bool equalsSoft(Nd4jLong *shapeA, Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD bool equalsStrict(Nd4jLong *shapeA, Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD int sizeAt(Nd4jLong *shape, int dim); template <typename T> ND4J_EXPORT _CUDA_HD void fill(T* buffer, T value, Nd4jLong length); ND4J_EXPORT _CUDA_HD void traceNew(int id); ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength); ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder); ND4J_EXPORT _CUDA_HD bool reshapeCF(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder, Nd4jLong* target); /** * Get the shape info buffer * for the given rank and shape. */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *buffer); /** * Get the shape info buffer * for the given rank and shape. */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape, Nd4jLong *output); //ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *tmpBuffer); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer); #ifdef __CUDACC__ template <typename T> __device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager); __device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size); #endif /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret); ND4J_EXPORT _CUDA_HD void updateStrides(Nd4jLong *shape, const char order); // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 template <typename T> ND4J_EXPORT _CUDA_HD bool isDimPermuted(const T* dimensions, const int dimSize); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret); /** * @param toCopy the shape to copy * @return a copy of the original struct */ ND4J_EXPORT _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy); ND4J_EXPORT _CUDA_HD bool strideDescendingCAscendingF(Nd4jLong *shapeBuffer); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return -1 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return -1 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer); /** * * @param length * @param shape * @param rearrange * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int* rearrange); /** * In place permute swap * @param length * @param shape * @param rearrange */ ND4J_EXPORT _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int* rearrange); ND4J_EXPORT _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange); ND4J_EXPORT _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out); ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const int *rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const Nd4jLong *rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int* rearrange); /** * Rearrange the permute indexes * according to which dimensions are specified. * * For example, dimension is implicitly: * 0,1,2 * * If you want to do a reduce along dimensions 0 and 1, * you need to permute the indexes to be: * 2,0,1 * * which will give us the ability to ierate along an element * wise stride. */ ND4J_EXPORT _CUDA_HD Nd4jLong* createPermuteIndexes(int originalRank, int *dimension,int dimensionLength); ND4J_EXPORT _CUDA_HD Nd4jLong* computeResultShape(Nd4jLong *originalShapeBuffer, int *dimension,int dimensionLength); /** * This method does inplace transpose of given shapeBuffer * * @param shapeBuffer */ ND4J_EXPORT _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer); /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ ND4J_EXPORT _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride); /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ template <typename T> ND4J_EXPORT _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength); /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ ND4J_EXPORT _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of cthe shape */ ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shape, int rank); /** * When 1 dimension is the whole length of the * array */ ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim); ND4J_EXPORT _CUDA_HD bool isRowVector(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ ND4J_EXPORT _CUDA_HD int isMatrix(Nd4jLong *shape, int rank); INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo); /** * Returns the shape portion of an information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy); template <typename T> ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> ND4J_EXPORT _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ ND4J_EXPORT _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes); /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ //ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange); /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ ND4J_EXPORT _CUDA_HD Nd4jLong *slice(Nd4jLong *shape); ND4J_EXPORT _CUDA_HD int slices(Nd4jLong *shapeBuffer); ND4J_EXPORT _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer); /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ ND4J_EXPORT _CUDA_HD int shapeInfoLength(int rank); ND4J_EXPORT _CUDA_HD int shapeInfoLength(Nd4jLong* shapeInfo); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(int rank); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(Nd4jLong* shapeInfo); /** * Returns the rank portion of * an information buffer */ ND4J_EXPORT _CUDA_HD int rank( Nd4jLong *buffer); /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ ND4J_EXPORT _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer); /** * Returns the stride portion of an information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer); /** * Compute the length of the given shape */ ND4J_EXPORT _CUDA_HD bool isEmpty(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD Nd4jLong length(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape); ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape); /*** * Returns the offset portion of an information buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong offset(Nd4jLong *buffer); ND4J_EXPORT _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer); /** * Returns the ordering * for this shape information buffer */ ND4J_EXPORT _CUDA_HD char order(Nd4jLong *buffer); /** * Returns the element wise stride for this information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong elementWiseStride(Nd4jLong *buffer); /** * Returns the element wise stride for this information * buffer * relative to a dimension and ordering for a reduction index */ ND4J_EXPORT _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong *buffer, int *dimension, int dimensionLength); /** * Returns whether * the given shape info buffer * represents a scalar shape */ ND4J_EXPORT _CUDA_HD int isScalar(Nd4jLong *info); /** * Returns whether * the given shape information * represents a scalar * shape or not */ ND4J_EXPORT _CUDA_HD int isScalar(volatile ShapeInformation *info); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> ND4J_EXPORT _CUDA_HD void removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *out); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> ND4J_EXPORT _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength); /** * Iterate over a given set of indexes * the begin and end indexes are 0 based. * 1 padding is automatically assumed for the ending. * * For example if you want to iterate over 0 to 4 * it will go to 4 rather than 3. * * indexes should be the indexes to exclude * indexes length should be the length of indexes */ ND4J_EXPORT _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end); /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ //#ifdef __CUDACC__ // __device__ //#endif // ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset); /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ ND4J_EXPORT _CUDA_HD Nd4jLong* ensureVectorShape(Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(); ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret); /** * Generate an int buffer * up to the given length * at the specified increment * */ template <typename T> ND4J_EXPORT _CUDA_HD T* range(int from, int to, int increment); /** * Range between from and two with an * increment of 1 */ template <typename T> ND4J_EXPORT _CUDA_HD T* range(int from, int to); /** * Keep the given indexes * in the data */ ND4J_EXPORT _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength); /** * Generate reverse copy of the data * @param data * @param length * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* reverseCopy(T *data, Nd4jLong length); template <typename T> ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length); template <typename T> ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length); template <typename T1, typename T2> ND4J_EXPORT _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length); /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length); /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* concat(int numArrays, int numTotalElements, Nd4jLong **arr, Nd4jLong *lengths); /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2); /** * Computes the tensor along dimension * offset * @param index the index to get the offset for the tad for * @param rank the rank of the shapes and strides * @param info the shape information to use for tad * @param dimension the dimensions to use for computing the tensor along dimensions */ // ND4J_EXPORT _CUDA_HD int offset(int index, // int rank, // shape::ShapeInformation *info, // Nd4jLong *dimension, // int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(int rank, volatile int length, volatile Nd4jLong *shape, int *dimension, int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ ND4J_EXPORT _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i); /** * Computes the number of tads per block * */ ND4J_EXPORT _CUDA_HD int tadsPerBlock(int blockSize, int tads); // ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension, // int dimensionLength); /** * Returns a shape buffer * for the shape information metadata. */ ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info); ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret); /** * Returns the number of elements per thread */ //#ifdef __CUDACC__ // __device__ //#endif // int numElementsPerThread(int N); /** * Returns the block starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int blockStartingIndex(int N); /** * Returns the thread starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadStartingIndex(int N, int stride, int offset); /** * Returns the thread ending index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadEndingIndex(int N, int stride, int offset); /** * Returns indexing information * for the current kernel invocation */ //#ifdef __CUDACC__ // __device__ //#endif // CurrentIndexing *currentIndex(int N, int offset, int stride); /** Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ ND4J_EXPORT _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad); /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ ND4J_EXPORT _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal); /** * Computes the number of tads * per reduce index for the * reduction tad. */ ND4J_EXPORT _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal); /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ ND4J_EXPORT _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum); /** * Returns the prod of the data * up to the given length */ ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length); ND4J_EXPORT _CUDA_HD Nd4jLong prodLong( Nd4jLong *data, int length); /** * Returns the rear most left over item not present in * the dimension array. This assumes that the dimension array is sorted. * * For example, given a dimension array of: * 0,2 * * and * * 12,4,2,1 in data * * You end up with 1 (data[3]) * since the first item won't match * the last item of the dimension array */ // ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength); /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ ND4J_EXPORT _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, Nd4jLong *shape, Nd4jLong *stride, Nd4jLong *indices,int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape,int index,int numIndices); ND4J_EXPORT _CUDA_HD Nd4jLong *ind2sub(int rank, Nd4jLong *shape,int index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2sub(int rank,Nd4jLong *shape,int index,int numIndices,Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, int index, Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(int rank, Nd4jLong *shape, Nd4jLong index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out); /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ ND4J_EXPORT _CUDA_HD int sub2Ind(int rank, Nd4jLong *shape, Nd4jLong *indices); /** * Compute the real linear indices for the given shape and stride */ ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride); /** * Compute the real linear indices for the * given shape buffer. Shape,stride and rank are derived * from the buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices( Nd4jLong *shapeBuffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo,int index,int numIndices,Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo,int index,Nd4jLong *out); ND4J_EXPORT _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides); ND4J_EXPORT _CUDA_HD void printIntArray(Nd4jLong *arr,int length); ND4J_EXPORT _CUDA_HD void printArray(float *arr,int length); ND4J_EXPORT _CUDA_HD Nd4jLong* shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr); // ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer); // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also sort input array of dimensions, this operation is also necessary for creating TAD object ND4J_EXPORT _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions); // return absolute index of array min, min is sub-array of max, index to be returned is min index and corresponds to maxIdx of max array ND4J_EXPORT _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int maxIdx); ND4J_EXPORT _CUDA_HD void shapeScalar(Nd4jLong* const buffer); ND4J_EXPORT _CUDA_HD void shapeVector(const Nd4jLong length, Nd4jLong* const buffer); ND4J_EXPORT _CUDA_HD void shapeOldScalar(Nd4jLong* const buffer, const char order); //END HEADERS //BEGIN IMPLEMENTATIONS #ifdef __CUDACC__ template <typename T> __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager) { // if we go for 3 dimensions coord space or below - just use shared memory for that if (size <= MAX_COORD * 4) { Nd4jLong *ptr = new Nd4jLong[size / 4];//manager->getSharedCoordBuffer() + (threadIdx.x * MAX_COORD); return ptr; } else { // otherwise go to preallocated global memory :( int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid * size > PREALLOC_SIZE - size) { return (Nd4jLong *) malloc(size); } else { Nd4jLong *ret = buffer; ret += (tid * size); return ret; } } } #endif #ifdef __CUDACC__ /** * BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES */ __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) { Nd4jLong *ret = buffer; ret += (threadIdx.x * size); return ret; } #endif /** * Length of a tad given * the shape information */ INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return shape::shapeOf(shapeInfo)[dimension[0]]; } else { int ret = 1; for(int i = 0; i < shape::rank(shapeInfo); i++) { for(int j = 0; j < dimensionLength; j++) { if(i == dimension[j]) ret *= shape::shapeOf(shapeInfo)[dimension[j]]; } } return ret; } } /** * Tad element wise stride: * given the inner most dimension (the sorted dimension of the last) * the element wise stride of the tad (disregarding order) is the * last dimension's stride. * * For a given singular dimension this will just be the only entry. * For example, given the following c order shape/stride: * 2,2,3,2 * 12,6,2,1 * * The tad element wise stride for 3 will be 1. * For zero it wil be 12 * * For 2,3 it's 1 * * Note here that the multi dimensional 2,3 case * is equivalent to the singular 3 case. * * * Note that this is for the dimension that ultimately * ends up removed. * * Again: this may not preserve ordering of the tad * but maybe used for reductions. */ INLINEDEF _CUDA_HD int tadElementWiseStride(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) { return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength); } INLINEDEF _CUDA_HD bool shapeEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } INLINEDEF _CUDA_HD bool shapeEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) { return shape::shapeEquals(shape::rank(shapeInfo1),shape::shapeOf(shapeInfo1),shape::rank(shapeInfo2),shape::shapeOf(shapeInfo2)); } INLINEDEF _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) { return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2)); } INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1 , Nd4jLong *stride2, int rank2) { if(rank1 != rank2) return false; for(int i = 0; i < rank1; i++) { if(stride1[i] != stride2[i]) return false; } return true; } INLINEDEF _CUDA_HD Nd4jLong *computeResultShape(Nd4jLong *originalShapeBuffer, int* dimension,int dimensionLength) { Nd4jLong *retShape; int retShapeLength; if(dimensionLength == 1 && dimension[0] == 2147483647) { retShape = new Nd4jLong[2]; retShape[0] = 1; retShape[1] = 1; retShapeLength = 2; } else { retShape = shape::removeIndex<Nd4jLong, int>(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength); retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength; } //ensure vector is proper shape if (retShapeLength == 1) { if (dimension[0] == 0) { auto newRetShape = new Nd4jLong[2]{1, retShape[0]}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } else { auto newRetShape = new Nd4jLong[2]{retShape[0], 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } } else if (retShapeLength == 0) { auto newRetShape = new Nd4jLong[2]{1, 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } auto ret = shape::shapeBuffer(retShapeLength,retShape); delete[] retShape; return ret; } INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer) { Nd4jLong *theShape = shape::shapeOf(shapeInfo); Nd4jLong *theStride = shape::stride(shapeInfo); int rank = dimensionLength == 1 ? 2 : dimensionLength; Nd4jLong *ret = buffer; //set the rank ret[0] = rank; Nd4jLong *retShape = shape::shapeOf(ret); Nd4jLong *retStride = shape::stride(ret); int len = rank; if(dimensionLength == 1) { if(shape::isMatrix(theShape,shape::rank(shapeInfo))) { if(dimension[0] == 0) { Nd4jLong newStride[2] = {theStride[dimension[0]],1}; Nd4jLong newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } else { Nd4jLong newStride[2] = {theStride[dimension[0]],1}; Nd4jLong newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { Nd4jLong newStride[2] = {1,theStride[dimension[0]]}; Nd4jLong newShape[2] = {1,theShape[dimension[0]]}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { Nd4jLong *newIndexes = dimension; if(reverseCopyStride) shape::reverseCopyTo(theStride, retStride, newIndexes, len); else shape::copyTo(len, theStride, retStride, newIndexes); shape::copyTo(len, theShape, retShape, newIndexes); } ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo); return ret; } INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride) { int rank = dimensionLength == 1 ? 2 : dimensionLength; traceNew(4); Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)]; return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret); } INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank) { traceNew(5); Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)]; return createShapeInfo(shape, stride, rank, ret); } INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer) { buffer[0] = rank; Nd4jLong *retShape = shape::shapeOf(buffer); Nd4jLong *retStride = shape::stride(buffer); for(int i = 0;i < rank; i++) { retShape[i] = shape[i]; retStride[i] = stride[i]; } return buffer; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum) { if (isVector(shape, rank)) { traceNew(5); Nd4jLong *ret = new Nd4jLong[2]; for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; traceNew(6); Nd4jLong *stride = new Nd4jLong[dimensions]; int st = startNum; for (int j = 0; j < rank; j++) { stride[j] = st; st *= shape[j]; } return stride; } INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong *ret) { if (isVector(shape, rank)) { for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; int st = startNum; for (int j = 0; j < rank; j++) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum) { traceNew(7); Nd4jLong *stride = new Nd4jLong[rank]; if (rank == 1) { stride[0] = 1; return stride; } if (shape::isVector(shape, rank)) { for (int i = 0; i < 2; i++) stride[i] = 1; return stride; } int st = startNum; for (int j = rank - 1; j >= 0; j--) { stride[j] = st; st *= shape[j]; } return stride; } INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret) { if (rank == 1) { ret[0] = 1; return ret; } if (shape::isVector(shape, rank)) { for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int st = startNum; for (int j = rank - 1; j >= 0; j--) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank) { return calcStridesFortran(shape, rank, 1); } INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret) { return calcStridesFortran(shape, rank, 1, ret); } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank) { return calcStrides(shape, rank, 1); } INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret) { return calcStrides(shape, rank, 1, ret); } INLINEDEF _CUDA_HD void updateStrides(Nd4jLong *shape, const char order) { int rank = shape[0]; int doubleRank = 2*rank; if (rank > 0) if(order == 'c') { shape[doubleRank] = 1; // set unity as last stride for c order for(int j=1; j<rank; ++j) shape[doubleRank-j] = shape[doubleRank-j+1]*shape[rank+1-j]; } else { shape[rank+1] = 1; // set unity as first stride for f order for(int j=rank+1; j<doubleRank; ++j) shape[j+1] = shape[j]*shape[j-rank]; } // set last 3 elements in shape shape[doubleRank + 1] = 0; shape[doubleRank + 2] = 1; shape[doubleRank + 3] = (int)order; } // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 template <typename T> INLINEDEF _CUDA_HD bool isDimPermuted(const T* dimensions, const Nd4jLong dimSize ) { for(int i=0; i<dimSize-1; ++i) if(dimensions[i] > dimensions[i+1]) return true; return false; } /** * @param toCopy the shape to copy * @return a copy of the original struct */ INLINEDEF _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy) { auto copy = new ShapeInformation; traceNew(8); copy->shape = new Nd4jLong[toCopy->rank]; memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(Nd4jLong)); traceNew(9); copy->stride = new Nd4jLong[toCopy->rank]; for (int i = 0; i < toCopy->rank; i++) { copy->stride[i] = toCopy->stride[i]; } copy->order = toCopy->order; copy->rank = toCopy->rank; copy->offset = toCopy->offset; copy->elementWiseStride = toCopy->elementWiseStride; return copy; } INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder) { if (rank == 0) return 1; if(shape::isVector(shape,rank)) { return stride[rank - 1]; } else { int oldnd; Nd4jLong *olddims = shape::copyOf(rank, shape); Nd4jLong *oldstrides = shape::copyOf(rank, stride); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; traceNew(10); auto newStrides = new Nd4jLong[rank]; oldnd = 0; //set the shape to be 1 x length int newShapeRank = 2; auto newShape = new Nd4jLong[newShapeRank]; newShape[0] = 1; newShape[1] = shape::prodLong(shape, rank); /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < rank; oi++) { if (shape[oi] != 1) { olddims[oldnd] = shape[oi]; oldstrides[oldnd] = stride[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newShapeRank; ni++) { np *= newShape[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newShapeRank && oi < oldnd) { np = newShape[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShape[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShape[nk]; } } ni = nj++; oi = oj++; } /* * Set strides corresponding to trailing 1s of the new shape. */ if (ni >= 1) { last_stride = newStrides[ni - 1]; } else { last_stride = stride[rank - 1]; } if (isFOrder) { if (ni >= 1) last_stride *= newShape[ni - 1]; } for (nk = ni; nk < newShapeRank; nk++) { newStrides[nk] = last_stride; } //returns the last element of the new stride array int ret = last_stride; delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return ret; } } INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength) { if(dimensionLength == 1) { return stride[dimension[0]]; } return -1; } /** * Get the shape info buffer * for the given rank and shape. */ INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape) { Nd4jLong *stride = shape::calcStrides(shape, rank); traceNew(11); auto shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'c'; shapeInfo->elementWiseStride = elementWiseStride; auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; return shapeInfoBuffer; } /** * This is special method, it returns ONLY 2D shapebuffer. * * This method is used only for SoftMax */ INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *buffer) { Nd4jLong stride[MAX_RANK]; shape::calcStrides(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'c'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, buffer); return buffer; } /** * Get the shape info buffer * for the given rank and shape. */ INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape) { auto stride = shape::calcStridesFortran(shape,rank); traceNew(12); auto shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'f'; shapeInfo->elementWiseStride = elementWiseStride; auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; return shapeInfoBuffer; } INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape, Nd4jLong *output) { Nd4jLong stride[MAX_RANK]; shape::calcStridesFortran(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'f'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, output); return output; } /** * Compute the real linear indices for the given shape and stride */ INLINEDEF _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride) { Nd4jLong length = shape::prodLong(shape,rank); traceNew(13); Nd4jLong *ret = new Nd4jLong[length]; for(int i = 0; i < length; i++) { Nd4jLong *idx = shape::ind2sub(rank, shape, i); ret[i] = shape::getOffset(0, shape, stride, idx, rank); delete[] idx; } return ret; } /** * Compute the real linear indices for the given shape and stride */ INLINEDEF _CUDA_HD Nd4jLong *computeIndices(Nd4jLong *shapeBuffer) { return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer)); } /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ INLINEDEF _CUDA_HD int sub2Ind(int rank, Nd4jLong *shape, Nd4jLong *indices) { int index = 0; int shift = 1; for(int i = 0; i < rank; i++) { index += shift * indices[i]; shift *= shape[i]; } return index; } template <typename T> INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) { #pragma omp simd for (int e = 0; e < length; e++) buffer[e] = value; } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, int index,int numIndices) { traceNew(14); auto ret = new Nd4jLong[rank]; ind2sub(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, int index) { return ind2sub(rank,shape, index,shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, int index, int numIndices, Nd4jLong *ret) { int denom = numIndices; for(int i = rank - 1; i >= 0; i--) { denom /= shape[i]; ret[i] = index / denom; index %= denom; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2sub(int rank,Nd4jLong *shape,int index, Nd4jLong *out) { ind2sub(rank,shape, index,shape::prodLong(shape,rank),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong * ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) { auto ret = new Nd4jLong[rank]; ind2subC(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong *ind2subC(int rank, Nd4jLong *shape, Nd4jLong index) { return ind2subC(rank,shape, index, shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) { auto denom = numIndices; for(int i = 0; i < rank; i++) { denom /= shape[i]; if(denom > 0) { ret[i] = index / denom; index %= denom; } else ret[i] = 0; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) { ind2subC(rank,shape, index,shape::prodLong(shape,rank),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, int index, int numIndices,Nd4jLong *out) { if(shape::order(shapeInfo) == 'f') { shape::ind2sub( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } else { shape::ind2subC( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, int index, Nd4jLong *out) { ind2subOrder(shapeInfo,index,shape::length(shapeInfo),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ /** * * @param length * @param shape * @param rearrange * @return */ INLINEDEF _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int *rearrange) { traceNew(16); Nd4jLong *ret = new Nd4jLong[length]; for (int i = 0; i < length; i++) { ret[i] = shape[rearrange[i]]; } return ret; } /** * * @param length * @param shape * @param rearrange * @return */ INLINEDEF _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int *rearrange) { if(length == 1) { return; } else { Nd4jLong *shapeDeref = *shape; if(shape::prodLong(shapeDeref,length) < 2) { return; } } bool inOrder = true; for(int i = 0; i < length - 1; i++) { inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1]; } //all in order, nothing to do if(inOrder) return; Nd4jLong *shapeDeref = *shape; //we know they are just reversed, dimension length of 2 if(length == 2) { auto shapeFirst = shapeDeref[0]; auto shapeSecond = shapeDeref[1]; shapeDeref[0] = shapeSecond; shapeDeref[1] = shapeFirst; return; } else if(length == 1) { //no permute return; } auto temp = new Nd4jLong[length]; memcpy(temp,shapeDeref,sizeof(Nd4jLong) * length); for (int i = 0; i < length; i++) { shapeDeref[i] = temp[rearrange[i]]; } delete[] temp; } INLINEDEF _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *out) { if(shapeBuffer != out) memcpy(out,shapeBuffer,sizeof(Nd4jLong) * shape::shapeInfoLength(shape::rank(shapeBuffer))); doPermuteShapeBuffer(shape::rank(shapeBuffer), shapeBuffer, rearrange, out); } INLINEDEF _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange) { auto len = shape::shapeInfoLength(shape::rank(shapeBuffer)); Nd4jLong *copy = shape::copyOf(len, shapeBuffer); doPermuteShapeBuffer(copy,rearrange); return copy; } INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const Nd4jLong *rearrange) { const int rank = shape::rank(shapeInfo); //check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute if(prodLong(shape::shapeOf(shapeInfo), rank) < 2) return; // check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well bool isPermutNecessary = false; for(int i = 0; i < rank; ++i) if(rearrange[i] != i) { isPermutNecessary = true; break; } if(!isPermutNecessary) return; // check whether rearrange contains correct indexes for(int i = 0; i < rank; ++i) if(rearrange[i] >= rank || rearrange[i] < 0) { printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n"); return; } // if everything is ok then perform permute auto temp = new Nd4jLong[shape::shapeInfoLength(rank)]; memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank)); for (int i = 0; i < rank; ++i) { shapeInfo[i + 1] = temp[rearrange[i] + 1]; shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank]; } shapeInfo[shapeInfoLength(rank) - 2] = -1; shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1); delete[] temp; } INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const int* rearrange) { const int rank = shape::rank(shapeInfo); //check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute if(prodLong(shape::shapeOf(shapeInfo), rank) < 2) return; // check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well bool isPermutNecessary = false; for(int i = 0; i < rank; ++i) if(rearrange[i] != i) { isPermutNecessary = true; break; } if(!isPermutNecessary) return; // check whether rearrange contains correct indexes for(int i = 0; i < rank; ++i) if(rearrange[i] >= rank || rearrange[i] < 0) { printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n"); return; } // if everything is ok then perform permute auto temp = new Nd4jLong[shape::shapeInfoLength(rank)]; memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank)); for (int i = 0; i < rank; ++i) { shapeInfo[i + 1] = temp[rearrange[i] + 1]; shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank]; } shapeInfo[shapeInfoLength(rank) - 2] = -1; shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1); delete[] temp; } INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer,int *rearrange) { //no swapping needs to happen if(shape::isScalar(shapeBuffer)) { return; } Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); Nd4jLong *shape = shape::shapeOf(shapeRef); Nd4jLong *stride = shape::stride(shapeRef); shape::doPermuteSwap(rearrageRank,&shape,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrange); shapeRef[shapeInfoLength(rearrageRank) - 2] = -1; shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1); // doPermuteShapeInfo(shapeBuffer, rearrange); // possible fix of integer overflow issue when strides are too large } /* INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) { auto shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); auto shape = shape::shapeOf(shapeRef); auto stride = shape::stride(shapeRef); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape, tmpBuffer); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer); shapeRef[shapeInfoLength(rearrageRank) - 2] = -1; shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1); } */ INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int *rearrange) { Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; Nd4jLong *shape = shape::shapeOf(shapeRef); Nd4jLong *stride = shape::stride(shapeRef); auto rearrangeCopy1 = shape::copyOf(rearrageRank, rearrange); shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1); delete[] rearrangeCopy1; auto rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange); shape::doPermuteSwap(rearrageRank, &stride, rearrangeCopy2); shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); shapeBuffer[shape::shapeInfoLength(rank) - 2] = -1; delete[] rearrangeCopy2; } INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) { Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; auto shape = shape::shapeOf(shapeRef); auto stride = shape::stride(shapeRef); if(shapeBuffer != tmpBuffer) shape::copyOf(rearrageRank,shapeBuffer, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrange); shapeRef[shapeInfoLength(rank) - 2] = -1; shapeRef[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); } INLINEDEF _CUDA_HD Nd4jLong *createPermuteIndexes(int originalRank, int *dimension,int dimensionLength) { int delta = originalRank - dimensionLength; traceNew(17); Nd4jLong *ret = new Nd4jLong[originalRank]; for(int i = 0; i < delta; i++) { ret[i] = i + dimensionLength; } for(int i = delta; i < originalRank; i++) { ret[i] = i - delta; } return ret; } /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) { int sd = -1; int dim = -1; int i = -1; int cContiguous = 1; int isFortran = 1; sd = 1; for (i = length - 1; i >= 0; --i) { dim = shape[i]; if (stride[i] != sd) { cContiguous = 0; break; } /* contiguous, if it got this far */ if (dim == 0) { break; } sd *= dim; } /* check if fortran contiguous */ sd = elementStride; for (i = 0; i < length; ++i) { dim = shape[i]; if (stride[i] != sd) { isFortran = 0; } if (dim == 0) { break; } sd *= dim; } if (isFortran && cContiguous) return 'a'; else if (isFortran && !cContiguous) return 'f'; else if (!isFortran && !cContiguous) return 'c'; else return 'c'; } /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ template <typename T> INLINEDEF _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength) { if (arrLength != shapeLength) return -1; for (int i = 0; i < arrLength; i++) { if (arr[i] >= arrLength || arr[i] < 0) return -1; } for (int i = 0; i < arrLength; i++) { for (int j = 0; j < arrLength; j++) { if (i != j && arr[i] == arr[j]) return -1; } } return 1; } INLINEDEF _CUDA_HD void traceNew(int id) { //printf("new happened: [%i]\n", id); #ifndef __CUDACC__ //fflush(stdout); #endif } /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ INLINEDEF _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank) { ShapeInformation *infoDeref = *info; checkArrangeArray(rearrange, rank, rank); shape::doPermuteSwap(rank, &infoDeref->shape, rearrange); shape::doPermuteSwap(rank, &infoDeref->stride, rearrange); char order = getOrder(rank, infoDeref->shape, infoDeref->stride, infoDeref->elementWiseStride); infoDeref->order = order; } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ INLINEDEF _CUDA_HD int isVector(Nd4jLong *shape, int rank) { if (rank == 0) return 0; if (rank == 1) return 1; if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 1; } return 0; } INLINEDEF _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim) { int numOfNonUnity = 0; for(int i = 1; i <= shapeInfo[0]; ++i) { if(shapeInfo[i] != 1) { ++numOfNonUnity; posOfNonUnityDim = i-1; } } return numOfNonUnity == 1 && shapeInfo[0] > 2; } INLINEDEF _CUDA_H Nd4jLong* detachShape(Nd4jLong *originalShape) { Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)]; memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape)); return newShape; } INLINEDEF _CUDA_H Nd4jLong* copyShape(Nd4jLong *originalShape) { Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)]; memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape)); return newShape; } INLINEDEF _CUDA_HD int isVector(Nd4jLong *shapeInfo) { return isVector(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } INLINEDEF _CUDA_HD bool isRowVector(Nd4jLong *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1; return isVector && shapeFirstOne; } INLINEDEF _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1; return isVector && !shapeFirstOne; } INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) { for(int i = 0; i < rank; i++) { if(shape[i] == shape::prod(shape,rank)) return 1; } return 0; } INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo) { return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shape, int rank) { if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 0; } return 1; } INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo) { return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns the shape portion of an information * buffer */ INLINEDEF _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer) { return buffer + 1; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> INLINEDEF _CUDA_HD T *copyOf(Nd4jLong length, T *toCopy) { traceNew(18); T *ret = new T[length]; return copyOf(length, toCopy, ret); } template <typename T> INLINEDEF _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret) { memcpy(ret, toCopy, sizeof(T)*length); return ret; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> INLINEDEF _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to) { memcpy(to, from, sizeof(T)*length); } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ INLINEDEF _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes) { for(int i = 0; i < length; i++) { to[i] = from[indexes[i]]; } } /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ /* INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) { Nd4jLong *strideCopy = copyOf(shapeRank, toPermute); checkArrangeArray(rearrange, shapeRank, shapeRank); Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange); delete[] strideCopy; return newStride; } */ /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ INLINEDEF _CUDA_HD Nd4jLong *slice(Nd4jLong *shape) { return shape + 1; } INLINEDEF _CUDA_HD int slices(Nd4jLong *shapeBuffer) { return static_cast<int>(shape::shapeOf(shapeBuffer)[0]); } INLINEDEF _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); int newRank = rank - 1; if(newRank < 2) newRank = 2; Nd4jLong *newShapeBuffer = new Nd4jLong[shape::shapeInfoLength(newRank)]; newShapeBuffer[0] = newRank; Nd4jLong *currShape = shape::shapeOf(shapeBuffer); Nd4jLong *currStride = shape::stride(shapeBuffer); //initialize new shape and stride by taking the shape and stride + 1 //and adding to the shape information //a slice is always just taking the existing shape and cutting the first index off //of the shape and stride Nd4jLong *newShape = shape::shapeOf(newShapeBuffer); Nd4jLong *newStride = shape::stride(newShapeBuffer); if(shape::isVector(shapeBuffer)) { Nd4jLong *currShape = shape::shapeOf(shapeBuffer); //row vector: slice index 0 is a valid index, just copy the whole thing if(currShape[0] == 1) { if(sliceIdx == 0) { memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoByteLength(shape::rank(shapeBuffer))); return newShapeBuffer; } } //column vector: this will be a scalar else { delete[] newShapeBuffer; Nd4jLong *scalar = shape::createScalarShapeInfo(); int offset = shape::offset(shapeBuffer); scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx; return scalar; } } else if(shape::isMatrix(shapeBuffer)) { newShape[0] = 1; newShape[1] = currShape[1]; newStride[0] = 1; newStride[1] = currStride[1]; } else { for(int i = 0; i < newRank; i++) { newShape[i] = currShape[i + 1]; newStride[i] = currStride[i + 1]; } } auto indices = new Nd4jLong[rank]; memset((void *) indices,0,rank * sizeof(Nd4jLong)); indices[0] = sliceIdx; Nd4jLong offset = shape::getOffset(0,newShape,newStride,indices,rank); newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset; if(shape::isMatrix(shapeBuffer)) { newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = currStride[1]; } else { newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = shape::elementWiseStride(shapeBuffer); } newShapeBuffer[shape::shapeInfoLength(newRank) - 1] = shape::getOrder(newRank,newShape,newStride,1); delete[] indices; return newShapeBuffer; } /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ INLINEDEF _CUDA_HD int shapeInfoLength(int rank) { //FIXME magic numbers return rank * 2 + 4; } INLINEDEF _CUDA_HD int shapeInfoLength(Nd4jLong* shape) { return shapeInfoLength(shape[0]); } INLINEDEF _CUDA_HD size_t shapeInfoByteLength(int rank) { //FIXME magic numbers return (rank * 2 + 4) * sizeof(Nd4jLong); } INLINEDEF _CUDA_HD size_t shapeInfoByteLength(Nd4jLong* shapeInfo) { //FIXME magic numbers return shapeInfoByteLength((int) shapeInfo[0]); } /** * Returns the rank portion of * an information buffer */ INLINEDEF _CUDA_HD int rank( Nd4jLong *buffer) { return static_cast<int>(buffer[0]); } /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ INLINEDEF _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer) { traceNew(19); auto info = new ShapeInformation; auto length = shapeInfoLength(rank(buffer)); auto rank = buffer[0]; //start after rank info->shape = buffer + 1; info->stride = buffer + (1 + rank); info->rank = rank; info->offset = buffer[length - 3]; info->elementWiseStride = buffer[length - 2]; Nd4jLong *stride = buffer + 1 + rank; info->stride = stride; info->order = (char) buffer[length - 1]; return info; } /** * Returns the stride portion of an information * buffer */ INLINEDEF _CUDA_HD Nd4jLong *stride( Nd4jLong *buffer) { return buffer + (1 + rank(buffer)); } INLINEDEF _CUDA_HD bool isEmpty(Nd4jLong *shapeInfo) { return ((shape::extra(shapeInfo) & ARRAY_EMPTY) == ARRAY_EMPTY); } /** * Compute the length of the given shape */ INLINEDEF _CUDA_HD Nd4jLong length(Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); if (rank == 0) { if (isEmpty(shapeInfo)) return 0L; else return 1L; } if (rank == 1) return shapeInfo[1]; return shape::prodLong(shape::shapeOf(shapeInfo), rank); } INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape) { Nd4jLong ret = 1; for (auto v : shape) { ret *= v; } return ret; } INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape) { Nd4jLong ret = 1; for (auto v : shape) { ret *= v; } return ret; } /*** * Returns the offset * portion of an information buffer */ INLINEDEF _CUDA_HD Nd4jLong offset(Nd4jLong *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } INLINEDEF _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } /** * Returns the ordering * for this shape information buffer */ INLINEDEF _CUDA_HD char order(Nd4jLong *buffer) { //FIXME magic numbers return static_cast<char>(buffer[(buffer[0] * 2 + 4) - 1]); } /** * Returns the element wise stride for this information * buffer */ INLINEDEF _CUDA_HD Nd4jLong elementWiseStride(Nd4jLong *buffer) { return buffer[shapeInfoLength(buffer[0]) - 2]; } /** * Returns the element wise stride for this information * buffer relative to a dimension and reduction index */ INLINEDEF _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong* buffer, int* dimension, int dimensionLength) { if(dimensionLength > 1) { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { //int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; //return tadElementWiseStride; auto tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } return 1; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } return 1; } } else { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ auto tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } } } /** * Returns whether * the given shape info buffer * represents a scalar shape */ INLINEDEF _CUDA_HD int isScalar(Nd4jLong *info) { const int rank = shape::rank(info); if(rank > 2) return 0; if(rank == 0) return 1; if(rank == 1) return shape::shapeOf(info)[0] == 1; if(rank == 2) return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1; return 0; } /** * Returns whether * the given shape information * represents a scalar * shape or not */ INLINEDEF _CUDA_HD int isScalar(volatile ShapeInformation *info) { const int rank = info->rank; if(rank > 2) return 0; if(rank == 1) return info->shape[0] == 1; if(rank == 2) return info->shape[0] == 1 && info->shape[1] == 1; return 0; } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> INLINEDEF _CUDA_HD void removeIndex(T1* data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *ret) { int count = 0; int absLength = dataLength - indexesLength; for (int i = 0; i < dataLength && count < absLength; i++) { int contains = 0; for (int j = 0; j < indexesLength; j++) { if (i == indexes[j]) { contains = 1; break; } } if (!contains) { ret[count] = data[i]; count++; } } } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> INLINEDEF _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength) { auto lengthOfArr = dataLength - indexesLength; if(lengthOfArr < 0) { printf("Remove index call created a <= 0 length array. This was likely not intended."); } auto ret = new T1[lengthOfArr]; memset(ret,0,sizeof(T1) * lengthOfArr); removeIndex<T1, T2>(data, indexes, dataLength, indexesLength, ret); return ret; } INLINEDEF _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end) { int len = end - indexesLength; traceNew(20); auto ret = new Nd4jLong[len]; int retIdx = 0; //not here that we do 0 based indexing for end - this assumes things like: //0 to 4 are specified for(int i = begin; i < end ; i++) { bool found = false; for(int j = 0; j < indexesLength; j++) { if(indexes[j] == i) { found = true; break; } } if(!found) { ret[retIdx++] = i; } } return ret; } /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ #ifdef __CUDACC__ INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) { return offset + threadIdx.x * xInfo->elementWiseStride; } #endif /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape, int dimension) { traceNew(21); Nd4jLong *ret = new Nd4jLong[2]; if (dimension == 0) { ret[0] = 1; ret[1] = shape[0]; } else { ret[0] = shape[0]; ret[1] = 1; } return ret; } /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape) { return ensureVectorShape(shape, 0); } /** * This method does STRICT comparison for two shape buffers * * @param shape * @return */ INLINEDEF _CUDA_HD bool equalsStrict(Nd4jLong *shapeA, Nd4jLong *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we do full comparison here int length = shape::shapeInfoLength(shapeA[0]); for (int e = 1; e < length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } INLINEDEF _CUDA_HD int sizeAt(Nd4jLong *shape, int dim) { if (dim >= 0) return shape[1+dim]; else return shape[1+(rank(shape) + dim)]; } /** * This method does SOFT comparison for two shape buffers, we compare only rank & shapes * * @param shape * @return */ INLINEDEF _CUDA_HD bool equalsSoft(Nd4jLong *shapeA, Nd4jLong *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we compare only shapes, and ignoring stride & ews auto length = shapeA[0]; for (int e = 1; e <= length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } /** * Generate an int buffer * up to the given length * at the specified increment * */ template <typename T> INLINEDEF _CUDA_HD T* range(int from, int to, int increment) { int diff = nd4j::math::nd4j_abs<int>(from - to); int retLength = diff / increment; T *ret; traceNew(22); if(diff / increment < 1) ret = new T[1]; else ret = new T[diff / increment]; if (from < to) { int count = 0; for (int i = from; i < to; i += increment) { if (count >= retLength) break; ret[count++] = i; } } else if (from > to) { int count = 0; for (int i = from - 1; i >= to; i -= increment) { if (count >= retLength) break; ret[count++] = i; } } return ret; } /** * Generate a range * beginning at from and ending at to * incrementing by 1 * @param from the start * @param to the end * @return the int array starting at from and ending at to */ template <typename T> INLINEDEF _CUDA_HD T* range(int from, int to) { return range<T>(from, to, 1); } /** * Keep the given indexes in the data * @param data * @param index * @param indexLength * @param dataLength * @return */ INLINEDEF _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength) { traceNew(23); Nd4jLong *ret = new Nd4jLong[indexLength]; int count = 0; for (int i = 0; i < dataLength; i++) { int contains = 0; for (int j = 0; j < indexLength; j++) { if (i == index[j]) { contains = 1; break; } } if (contains) ret[count++] = data[i]; } return ret; } /** * Generate a reverse * copy of the data */ template <typename T> INLINEDEF _CUDA_HD T* reverseCopy(T *data, Nd4jLong length) { if (length < 1) return nullptr; traceNew(24); T *copy = new T[length]; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = data[i]; copy[i] = data[length - i - 1]; copy[length - i - 1] = temp; } return copy; } template <typename T> INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length) { if (length < 1) return; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = from[i]; to[i] = from[length - i - 1]; to[length - i - 1] = temp; } } template <typename T> INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length) { if (length < 1) return; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = from[indexes[i]]; to[i] = from[indexes[length - i - 1]]; to[length - i - 1] = temp; } } /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ template <typename T> INLINEDEF _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length) { traceNew(25); T *ret = new T[arr1Length + arr2Length]; std::memcpy(ret, arr1, arr1Length * sizeof(T)); std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(T)); return ret; } /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ template <typename T> INLINEDEF _CUDA_HD T *concat(Nd4jLong numArrays, Nd4jLong numTotalElements, T **arr, Nd4jLong *lengths) { T* ret = new T[numTotalElements]; Nd4jLong count = 0; for (Nd4jLong i = 0; i < numArrays; i++) { for (Nd4jLong j = 0; j < lengths[i]; j++) { ret[count++] = arr[i][j]; } } return ret; } /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ INLINEDEF _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int* dimension, int dimensionLength) { if(shape::isVector(shape,rank)) { //return total length for row vectors if(dimensionLength == 1 && shape[0] == 1) { return shape::prod(shape,rank); } } else if(rank == dimensionLength) return shape::prod(shape,rank); int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength); traceNew(27); auto ret2 = shape::removeIndex<Nd4jLong>(shape, dimension, rank, dimensionLength); auto ret = prodLong(ret2, absSelta); delete[] ret2; return ret; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int* dimension, int dimensionLength) { auto tensorLength = prodLong(tensorShape, tensorShapeLength); auto lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength); if (lengthPerSlice2 <= 0) { return 0; } Nd4jLong offset = index * tensorLength / lengthPerSlice2; return offset; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) { Nd4jLong offset = index * tensorLength / lengthPerSlice2; return offset; } #ifdef __CUDACC__ /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ INLINEDEF _CUDA_D int tadOffset(Nd4jLong *xInfo, int offset) { return offset + threadIdx.x * elementWiseStride(xInfo); } #endif /** * Computes the number * of tensors along * a given dimension */ INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(volatile int rank, volatile int length, volatile Nd4jLong *shape, int *dimension, int dimensionLength) { Nd4jLong *tensorShape = shape::keep(shape, dimension, dimensionLength, rank); Nd4jLong ret = length / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Computes the number * of tensors along * a given dimension */ INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { Nd4jLong *keepShape = shape::shapeOf(shapeInfo); Nd4jLong *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo)); Nd4jLong ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ INLINEDEF _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, Nd4jLong *shape, Nd4jLong *stride, Nd4jLong *indices, int rank) { Nd4jLong offset = baseOffset; for(int i = 0; i < rank; i++) { if(indices[i] >= shape[i] && shape[i] != 1) { #ifdef __CUDA_ARCH__ printf("D: Index %i [%lld] must not be >= shape[%lld].\n", i,indices[i],shape[i]); #else printf("H: Index %i [%lld] must not be >= shape[%lld].\n", i, (long long) indices[i], (long long) shape[i]); #endif #ifdef __CUDA_ARCH__ if (threadIdx.x == 0 && blockIdx.x == 0) printShapeInfoLinear("getOffsetFailed", rank, shape, stride); #endif return -1; } if(shape[i] != 1) { offset += indices[i] * stride[i]; } } return offset; } /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ INLINEDEF _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i) { return blockIdx + i * blockSize; } /** * Computes the number of tads per block * */ INLINEDEF _CUDA_HD int tadsPerBlock(int blockSize, int tads) { return (int) nd4j::math::nd4j_ceil<double>(tads / (double) blockSize); } /** * Returns a shape buffer * for the shape information metadata. */ INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info) { traceNew(29); auto ret = new Nd4jLong[shapeInfoLength(info->rank)]; int count = 1; int rank = info->rank; ret[0] = info->rank; for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count] = info->order; return ret; } INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret) { int count = 1; int rank = info->rank; ret[0] = info->rank; if (ret[0] == 0) { ret[1] = 0; ret[2] = 1; ret[3] = 99; return ret; } for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count++] = info->order; return ret; } INLINEDEF _CUDA_HD void printIntArray(Nd4jLong *arr,int length) { for(int i = 0; i < length; i++) { printf(" %lld ", (long long) arr[i]); } printf("\n"); } INLINEDEF _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); Nd4jLong *shape = shape::shapeOf(shapeInfo); printf("Rank %d\n",rank); printf("Shape:\n"); for(int i = 0; i < rank; i++) { printf(" %lld ",(long long) shape[i]); } printf("\n"); Nd4jLong *stride = shape::stride(shapeInfo); printf("Stride:\n"); for(int i = 0; i < rank; i++) { printf(" %lld ", (long long) stride[i]); } printf("\n"); printf("Order %c\n",shape::order(shapeInfo)); } INLINEDEF _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); int lim = shape::shapeInfoLength(rank); printf("ShapeInfo: ["); for (int i = 0; i < lim; i++) { printf("%lld", (long long) shapeInfo[i]); if (i < lim - 1) { printf(", "); } } printf("]\n"); #ifndef __CUDA_ARCH__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides) { printf("%s : [", msg); for (int i = 0; i < rank; i++) { printf("%lld, ", (long long) shape[i]); } for (int i = 0; i < rank; i++) { printf("%lld", (long long) strides[i]); if (i < rank - 1) printf(", "); } printf("]\n"); #ifndef __CUDA_ARCH__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); int lim = shape::shapeInfoLength(rank); printf("%s : [", msg); for (int i = 0; i < lim; i++) { printf("%lld", (long long) shapeInfo[i]); if (i < lim - 1) { printf(", "); } } printf("]\n"); #ifndef __CUDACC__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printArray(float *arr,int length) { printf("Array: ["); for (int i = 0; i < length; i ++) { printf("%f", arr[i]); if (i + 1 < length) printf(", "); } printf("]\n"); } /** * Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ INLINEDEF _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad) { return i / (numElementsPerTad * elementWiseStride); } /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ INLINEDEF _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal) { if (tadIndexForOriginal == 0) return 0; return tadIndexForOriginal / (tadsForOriginal / tadsForReduced); } INLINEDEF _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); Nd4jLong *shape = shape::shapeOf(shapeBuffer); Nd4jLong *strides = shape::stride(shapeBuffer); // swap shape for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = shape[idx2]; shape[idx2] = shape[idx1]; shape[idx1] = tmp; } // swap strides for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = strides[idx2]; strides[idx2] = strides[idx1]; strides[idx1] = tmp; } if (shape::order(shapeBuffer) == 'c') shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102; else shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99; } /** * Tad index for linear * @param linearIndex * @param tadLength * @return */ INLINEDEF _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength) { return linearIndex % tadLength; } /** * Computes the number of tads * per reduce index for the * reduction tad. */ INLINEDEF _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) { return tadsForOriginal / tadsForReduce; } /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ INLINEDEF _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum) { int tad = tadIndex(i, elementWiseStride, numElementsPerTad); return reductionIndexForTad(tad, tadNum, originalTadNum); } INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo() { traceNew(30); auto shape = new Nd4jLong[1]; shape[0] = 1; auto stride = new Nd4jLong[1]; stride[0] = 1; auto shapeInformation2 = new ShapeInformation(); shapeInformation2->rank = 1; shapeInformation2->offset = 0; shapeInformation2->stride = stride; shapeInformation2->shape = shape; shapeInformation2->elementWiseStride = 1; shapeInformation2->order = 99; Nd4jLong *ret = shape::toShapeBuffer(shapeInformation2); delete shapeInformation2; delete[] shape; delete[] stride; return ret; } INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret) { ret[0] = 2; ret[1] = 1; ret[2] = 1; ret[3] = 1; ret[4] = 1; ret[5] = 0; ret[6] = 1; ret[7] = 99; return ret; } /** * Returns the prod of the data * up to the given length */ INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) { int prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } /** * Returns the prod of the data * up to the given length */ INLINEDEF _CUDA_HD Nd4jLong prodLong( Nd4jLong *data, int length) { Nd4jLong prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } INLINEDEF _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data, Nd4jLong *dimension,int dimensionLength) { Nd4jLong *stride = shape::stride(data); //corner case: return the final item when its greater than the max, since its guaranteed to be left over //note here that strides are interpreted in reverse for tad //start from the front rather than the back int rank = shape::rank(data); if(shape::order(data) == 'f') { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } else { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } int ret = stride[0]; return ret; } #ifdef __CUDACC__ __device__ INLINEDEF void sweepShapeInfoBuffer(Nd4jLong *shapeInfoBuffer, Nd4jLong *targetBuffer) { // we read first element, to find out length of our shapeInfoBuffer int rank = shapeInfoBuffer[0]; int len = shape::shapeInfoLength(rank); for (int i = threadIdx.x; i < len; i += blockDim.x) targetBuffer[i] = shapeInfoBuffer[i]; } #endif INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr) { return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int*) arr.shape.data(),arr.fortranOrder); } // INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) { // unsigned Nd4jLong *shape; // unsigned int ndims, wordSize; // bool fortranOrder; // cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder); // Nd4jLong * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder); // delete[] shape; // return ret; // } INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(int rank, unsigned int* shape,bool fortranOrder) { if(fortranOrder) { Nd4jLong *shapeBufferRet = shape::shapeBufferFortran(rank,(Nd4jLong *) shape); return shapeBufferRet; } else { Nd4jLong *newShape = new Nd4jLong[rank]; for(int i = 0; i < rank; i++) { newShape[i] = shape[i]; } Nd4jLong *shapeBufferRet = shape::shapeBuffer(rank,newShape); delete[] newShape; return shapeBufferRet; } } INLINEDEF _CUDA_HD bool strideDescendingCAscendingF(Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); Nd4jLong *strides = shape::stride(shapeBuffer); char order = shape::order(shapeBuffer); if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1) return true; if (order == 'c') { for (int i = 1; i < rank; i++) if (strides[i-1] <= strides[i]) return false; return true; } else if (order == 'f') { for (int i = 1; i < rank; i++) if (strides[i-1] >= strides[i]) return false; return true; } else { printf("Unknown order for array!\n"); return false; } } INLINEDEF _CUDA_H bool reshapeCF(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) { int oldnd; Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape)); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; Nd4jLong* newStrides = new Nd4jLong[newRank]; oldnd = 0; /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < oldRank; oi++) { if (shape::shapeOf(oldShape)[oi] != 1) { olddims[oldnd] = shape::shapeOf(oldShape)[oi]; oldstrides[oldnd] = shape::stride(oldShape)[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newRank; ni++) { np *= newShapeOf[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newRank && oi < oldnd) { np = newShapeOf[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShapeOf[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; } } ni = nj++; oi = oj++; } if (ni >= 1) { last_stride = newStrides[ni - 1]; } else { last_stride = shape::elementWiseStride(oldShape); } if (isFOrder && ni >= 1) { last_stride *= newShapeOf[ni - 1]; } for (nk = ni; nk < newRank; nk++) { newStrides[nk] = last_stride; } target[0] = newRank; int cnt = 1; for (int e = 0; e < newRank; e++) target[cnt++] = newShapeOf[e]; for (int e = 0; e < newRank; e++) target[cnt++] = newStrides[e]; target[shape::shapeInfoLength(newRank) - 3] = 0; target[shape::shapeInfoLength(newRank) - 2] = -1; target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99; delete[] olddims; delete[] oldstrides; delete[] newStrides; return true; } INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) { int oldnd; Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape)); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; auto newStrides = new Nd4jLong[newRank]; oldnd = 0; /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < oldRank; oi++) { if (shape::shapeOf(oldShape)[oi] != 1) { olddims[oldnd] = shape::shapeOf(oldShape)[oi]; oldstrides[oldnd] = shape::stride(oldShape)[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newRank; ni++) { np *= newShapeOf[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newRank && oi < oldnd) { np = newShapeOf[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShapeOf[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; } } ni = nj++; oi = oj++; } delete[] olddims; delete[] oldstrides; delete[] newStrides; return true; } // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also it sorts input array of dimensions, this operation is also necessary for creating TAD object INLINEDEF _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions) { int dimSize = dimensions.size(); if(dimSize == 0) throw std::runtime_error("shape::checkDimensions method: array of dimensions is empty!"); // check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim| for(auto& dim : dimensions) if(dim < 0) dim += rank; // sort input array of dimensions, this operation is also necessary for creating TAD object in external methods if (dimSize > 1) { std::sort(dimensions.begin(), dimensions.end()); // remove duplicates if they are present dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end()); } // check whether number of dimensions is to big (>rank) dimSize = dimensions.size(); if(dimSize > rank) throw std::runtime_error("shape::checkDimensions method: number of input dimensions is too big ( > rank of array)!"); // check if min dimension is still negative and whether max dimension is bigger then rank-1 if(dimensions[0] < 0 || dimensions.back() > (rank-1)) throw std::runtime_error("shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !"); } // return absolute index of array min, min is sub-array of max, index to be returned is min's index and corresponds to maxIdx of max array INLINEDEF _CUDA_H Nd4jLong subArrayIndex(const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int maxIdx) { const int rankMax = maxShapeInfo[0]; const int rankMin = minShapeInfo[0]; auto* idxPerRank = new Nd4jLong[rankMax]; ind2subC(rankMax, const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<int&>(maxIdx), idxPerRank); Nd4jLong minIdx = 0; for(int i = 0; i < rankMin; ++i) { if(minShapeInfo[rankMin - i] == 1 || idxPerRank[rankMax - i - 1] == 0) continue; if(idxPerRank[rankMax - i - 1] >= minShapeInfo[rankMin - i]) idxPerRank[rankMax - i - 1] %= minShapeInfo[rankMin - i]; minIdx += idxPerRank[rankMax - i - 1] * stride(const_cast<Nd4jLong*>(minShapeInfo))[rankMin - i - 1]; } delete[] idxPerRank; return minIdx; } INLINEDEF _CUDA_HD void shapeScalar(Nd4jLong* const buffer) { buffer[0] = 0; buffer[1] = 0; buffer[2] = 1; buffer[3] = 99; } INLINEDEF _CUDA_HD void shapeOldScalar(Nd4jLong* const buffer, const char order) { buffer[0] = 2; buffer[1] = 1; buffer[2] = 1; buffer[3] = 1; buffer[4] = 1; buffer[5] = 0; buffer[6] = 1; buffer[7] = (int)order; } INLINEDEF _CUDA_HD void shapeVector(const Nd4jLong length, Nd4jLong* const buffer) { buffer[0] = 1; buffer[1] = length; buffer[2] = 1; buffer[3] = 0; buffer[4] = 1; buffer[5] = 99; } template <typename T1, typename T2> INLINEDEF _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length) { for (Nd4jLong e = 0; e < length; e++) to[e] = (T2) from[e]; }; } #endif /* SHAPE_H_ */
GB_binop__le_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__le_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__le_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint8) // A*D function (colscale): GB (_AxD__le_uint8) // D*A function (rowscale): GB (_DxB__le_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__le_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__le_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint8) // C=scalar+B GB (_bind1st__le_uint8) // C=scalar+B' GB (_bind1st_tran__le_uint8) // C=A+scalar GB (_bind2nd__le_uint8) // C=A'+scalar GB (_bind2nd_tran__le_uint8) // C type: bool // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_UINT8 || GxB_NO_LE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__le_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__le_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__ge_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ge_fp32 // A.*B function (eWiseMult): GB_AemultB__ge_fp32 // A*D function (colscale): GB_AxD__ge_fp32 // D*A function (rowscale): GB_DxB__ge_fp32 // C+=B function (dense accum): GB_Cdense_accumB__ge_fp32 // C+=b function (dense accum): GB_Cdense_accumb__ge_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_fp32 // C=scalar+B GB_bind1st__ge_fp32 // C=scalar+B' GB_bind1st_tran__ge_fp32 // C=A+scalar GB_bind2nd__ge_fp32 // C=A'+scalar GB_bind2nd_tran__ge_fp32 // C type: bool // A type: float // B,b type: float // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_FP32 || GxB_NO_GE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ge_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ge_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ge_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ge_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ge_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__ge_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ge_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ge_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ge_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__ge_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__ge_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pvm-secuencial.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> main(int argc, char **argv) { int N = atoi(argv[1]); int i,j; omp_sched_t kind; int modifier; double **m, *v1, *v2; m = (double **)malloc (N*sizeof(double *)); for (i=0;i<N;i++) m[i] = (double *) malloc (N*sizeof(double)); v1 = (double *) malloc (N*sizeof(double)); v2 = (double *) malloc (N*sizeof(double)); double start,end,elapsed; if(argc < 2) { fprintf(stderr,"Faltan argumentos\n"); exit(-1); } // Inicialización del vector y la matriz for (i = 0; i < N; ++i){ v1[i] = i + 1; // Matriz triangular superior for(j = 0; j < i; ++j){ m[i][j] = 0; } for(j = i; j < N; ++j){ m[i][j] = j + 1; } } if(N<20){ printf("Matriz:\n"); for(i = 0; i < N;i++){ for(j = 0; j < N; j++) printf(" %g ",m[i][j]); printf("\n"); } printf("Vector:\n"); for(i = 0; i < N;i++){ printf(" %g ",v1[i]); } printf("\n"); } //Modificación de las variables omp_set_num_threads(omp_get_num_procs()); omp_get_schedule(&kind, &modifier); //printf(" Dentro del parallel: \n"); //printf(" dyn-var-> %d ;nthreads-var->%d; thread-limit-var->%d; run-sched-var-kind->%d; run-sched-var-modifier->%d; \n", //omp_get_dynamic(),omp_get_max_threads(),omp_get_thread_limit(), kind, modifier); start = omp_get_wtime(); //Multiplicamos #pragma omp parallel for schedule(runtime) private (j) for (i=0; i<N; ++i){ for (j=i; j<N; ++j) #pragma omp atomic v2[i] = (m[i][j] * v1[j])+v2[i]; } end = omp_get_wtime(); elapsed = end - start; //Imprimimos //printf("Vector Resultante:\n"); if(N<20) for(i = 0; i < N;i++) printf("v[%d] = %g\n",i,v2[i]); else{ printf("v[%d] = %g\n",0,v2[0]); printf("v[%d] = %g\n",N-1,v2[N-1]); } printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",elapsed,N); }
omp_for_nowait.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" /* * This test will hang if the nowait is not working properly. * * It relies on a thread skipping to the second for construct to * release the threads in the first for construct. * * Also, we use static scheduling to guarantee that one * thread will make it to the second for construct. */ volatile int release; volatile int count; void wait_for_release_then_increment(int rank) { fprintf(stderr, "Thread nr %d enters first for construct" " and waits.\n", rank); while (release == 0) THREAD_SCHED_POINT(); #pragma omp atomic count++; } void release_and_increment(int rank) { fprintf(stderr, "Thread nr %d sets release to 1\n", rank); release = 1; #pragma omp atomic count++; } int test_omp_for_nowait() { release = 0; count = 0; #pragma omp parallel num_threads(4) { int rank; int i; rank = omp_get_thread_num(); #pragma omp for schedule(static) nowait for (i = 0; i < 4; i++) { if (i < 3) wait_for_release_then_increment(rank); else { fprintf(stderr, "Thread nr %d enters first for and goes " "immediately to the next for construct to release.\n", rank); #pragma omp atomic count++; } } #pragma omp for schedule(static) for (i = 0; i < 4; i++) { release_and_increment(rank); } } return (count==8); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_nowait()) { num_failed++; } } return num_failed; }
GB_convert_sparse_to_hyper.c
//------------------------------------------------------------------------------ // GB_convert_sparse_to_hyper: convert a matrix from sparse to hyperspasre //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // On input, the matrix may have shallow A->p content; it is safely removed. // On output, the matrix is always hypersparse (even if out of memory). If the // input matrix is non-hypersparse, it is given new A->p and A->h that are not // shallow. If the input matrix is already hypersparse, nothing is changed // (and in that case A->p and A->h remain shallow on output if shallow on // input). The A->x and A->i content is not changed; it remains in whatever // shallow/non-shallow state that it had on input). // If an out-of-memory condition occurs, all content of the matrix is cleared. // If the input matrix A is hypersparse, bitmap or full, it is unchanged. #include "GB.h" GrB_Info GB_convert_sparse_to_hyper // convert from sparse to hypersparse ( GrB_Matrix A, // matrix to convert to hypersparse GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A converting to hypersparse", GB0) ; int64_t anz = GB_NNZ (A) ; ASSERT (GB_ZOMBIES_OK (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_PENDING_OK (A)) ; //-------------------------------------------------------------------------- // convert A from sparse to hypersparse //-------------------------------------------------------------------------- if (GB_IS_SPARSE (A)) { //---------------------------------------------------------------------- // determine the number of threads to use //---------------------------------------------------------------------- GBURBLE ("(sparse to hyper) ") ; int64_t n = A->vdim ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (n, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; ntasks = GB_IMIN (ntasks, n) ; ntasks = GB_IMAX (ntasks, 1) ; //---------------------------------------------------------------------- // count the number of non-empty vectors in A in each slice //---------------------------------------------------------------------- ASSERT (A->nvec == A->plen && A->plen == n) ; const int64_t *restrict Ap_old = A->p ; size_t Ap_old_size = A->p_size ; bool Ap_old_shallow = A->p_shallow ; GB_WERK_DECLARE (Count, int64_t) ; GB_WERK_PUSH (Count, ntasks+1, int64_t) ; if (Count == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, my_nvec_nonempty = 0 ; ; GB_PARTITION (jstart, jend, n, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap_old [j] < Ap_old [j+1]) my_nvec_nonempty++ ; } Count [tid] = my_nvec_nonempty ; } //---------------------------------------------------------------------- // compute cumulative sum of Counts and nvec_nonempty //---------------------------------------------------------------------- GB_cumsum (Count, ntasks, NULL, 1, NULL) ; int64_t nvec_nonempty = Count [ntasks] ; A->nvec_nonempty = nvec_nonempty ; //---------------------------------------------------------------------- // allocate the new A->p and A->h //---------------------------------------------------------------------- int64_t *restrict Ap_new = NULL ; size_t Ap_new_size = 0 ; int64_t *restrict Ah_new = NULL ; size_t Ah_new_size = 0 ; Ap_new = GB_MALLOC (nvec_nonempty+1, int64_t, &Ap_new_size) ; Ah_new = GB_MALLOC (nvec_nonempty , int64_t, &Ah_new_size) ; if (Ap_new == NULL || Ah_new == NULL) { // out of memory GB_WERK_POP (Count, int64_t) ; GB_FREE (&Ap_new, Ap_new_size) ; GB_FREE (&Ah_new, Ah_new_size) ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // transplant the new A->p and A->h into the matrix //---------------------------------------------------------------------- A->plen = nvec_nonempty ; A->nvec = nvec_nonempty ; A->p = Ap_new ; A->p_size = Ap_new_size ; A->h = Ah_new ; A->h_size = Ah_new_size ; A->p_shallow = false ; A->h_shallow = false ; //---------------------------------------------------------------------- // construct the new hyperlist in the new A->p and A->h //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = Count [tid] ; GB_PARTITION (jstart, jend, n, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap_old [j] < Ap_old [j+1]) { // vector index j is the kth vector in the new Ah Ap_new [k] = Ap_old [j] ; Ah_new [k] = j ; k++ ; } } ASSERT (k == Count [tid+1]) ; } Ap_new [nvec_nonempty] = anz ; A->magic = GB_MAGIC ; //---------------------------------------------------------------------- // free workspace, and free the old A->p unless it's shallow //---------------------------------------------------------------------- GB_WERK_POP (Count, int64_t) ; if (!Ap_old_shallow) { GB_FREE (&Ap_old, Ap_old_size) ; } //---------------------------------------------------------------------- // A is now hypersparse //---------------------------------------------------------------------- ASSERT (GB_IS_HYPERSPARSE (A)) ; } //-------------------------------------------------------------------------- // A is now in hypersparse form (or left as full or bitmap) //-------------------------------------------------------------------------- ASSERT (anz == GB_NNZ (A)) ; ASSERT_MATRIX_OK (A, "A conv to hypersparse (or left full/bitmap)", GB0) ; ASSERT (!GB_IS_SPARSE (A)) ; ASSERT (GB_ZOMBIES_OK (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_PENDING_OK (A)) ; return (GrB_SUCCESS) ; }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
omp_task_final.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int test_omp_task_final() { int tids[NUM_TASKS]; int includedtids[NUM_TASKS]; int i; int error = 0; #pragma omp parallel { #pragma omp single { for (i = 0; i < NUM_TASKS; i++) { /* First we have to store the value of the loop index in a new variable * which will be private for each task because otherwise it will be overwritten * if the execution of the task takes longer than the time which is needed to * enter the next step of the loop! */ int myi; myi = i; #pragma omp task final(i>=10) { tids[myi] = omp_get_thread_num(); /* we generate included tasks for final tasks */ if(myi >= 10) { int included = myi; #pragma omp task { my_sleep (SLEEPTIME); includedtids[included] = omp_get_thread_num(); } /* end of omp included task of the final task */ my_sleep (SLEEPTIME); } /* end of if it is a final task*/ } /* end of omp task */ } /* end of for */ } /* end of single */ } /*end of parallel */ /* Now we ckeck if more than one thread executed the final task and its included task. */ for (i = 10; i < NUM_TASKS; i++) { if (tids[i] != includedtids[i]) { error++; } } return (error==0); } /* end of check_paralel_for_private */ int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_task_final()) { num_failed++; } } return num_failed; }
lbfgsbsolver.h
// CppNumericalSolver #include <iostream> #include <list> #include <Eigen/LU> #include "isolver.h" #include "../linesearch/morethuente.h" #ifndef LBFGSBSOLVER_H_ #define LBFGSBSOLVER_H_ namespace cppoptlib { template<typename Dtype> class LbfgsbSolver : public ISolver<Dtype, 1> { // last updates std::list<Vector<Dtype>> xHistory; // workspace matrices Matrix<Dtype> W, M; // ref to problem statement Problem<Dtype> *objFunc_; Vector<Dtype> lboundTemplate; Vector<Dtype> uboundTemplate; Dtype theta; int DIM; /** * @brief sort pairs (k,v) according v ascending * @details [long description] * * @param v [description] * @return [description] */ std::vector<int> sort_indexes(const std::vector< std::pair<int, Dtype> > &v) { std::vector<int> idx(v.size()); for (size_t i = 0; i != idx.size(); ++i) idx[i] = v[i].first; sort(idx.begin(), idx.end(), [&v](size_t i1, size_t i2) { return v[i1].second < v[i2].second; }); return idx; } /** * @brief Algorithm CP: Computation of the generalized Cauchy point * @details PAGE 8 * * @param c [description] */ void GetGeneralizedCauchyPoint(Vector<Dtype> &x, Vector<Dtype> &g, Vector<Dtype> &x_cauchy, Vector<Dtype> &c) { const int DIM = x.rows(); // Given x,l,u,g, and B = \theta I-WMW // {all t_i} = { (idx,value), ... } // TODO: use "std::set" ? std::vector<std::pair<int, Dtype> > SetOfT; // the feasible set is implicitly given by "SetOfT - {t_i==0}" Vector<Dtype> d = -g; // n operations for (int j = 0; j < DIM; j++) { if (g(j) == 0) { SetOfT.push_back(std::make_pair(j, std::numeric_limits<Dtype>::max())); } else { Dtype tmp = 0; if (g(j) < 0) { tmp = (x(j) - uboundTemplate(j)) / g(j); } else { tmp = (x(j) - lboundTemplate(j)) / g(j); } SetOfT.push_back(std::make_pair(j, tmp)); } } // sortedindices [1,0,2] means the minimal element is on the 1-st entry std::vector<int> sortedIndices = sort_indexes(SetOfT); x_cauchy = x; // Initialize // p := W^Dtype*p Vector<Dtype> p = (W.transpose() * d); // (2mn operations) // c := 0 c = Eigen::Matrix<Dtype, Eigen::Dynamic, Eigen::Dynamic>::Zero(M.rows(), 1); // f' := g^Dtype*d = -d^Td Dtype f_prime = -d.dot(d); // (n operations) // f'' := \theta*d^Dtype*d-d^Dtype*W*M*W^Dtype*d = -\theta*f' - p^Dtype*M*p Dtype f_doubleprime = (Dtype)(-1.0 * theta) * f_prime - p.dot(M * p); // (O(m^2) operations) // \delta t_min := -f'/f'' Dtype dt_min = -f_prime / f_doubleprime; // t_old := 0 Dtype t_old = 0; // b := argmin {t_i , t_i >0} int i = 0; for (int j = 0; j < DIM; j++) { i = j; if (SetOfT[sortedIndices[j]].second > 0) break; } int b = sortedIndices[i]; // see below // t := min{t_i : i in F} Dtype t = SetOfT[b].second; // \delta Dtype := t - 0 Dtype dt = t ; // examination of subsequent segments while ((dt_min >= dt) && (i < DIM)) { if (d(b) > 0) x_cauchy(b) = uboundTemplate(b); else if (d(b) < 0) x_cauchy(b) = lboundTemplate(b); // z_b = x_p^{cp} - x_b Dtype zb = x_cauchy(b) - x(b); // c := c +\delta t*p c += dt * p; // cache Vector<Dtype> wbt = W.row(b); f_prime += dt * f_doubleprime + (Dtype) g(b) * g(b) + (Dtype) theta * g(b) * zb - (Dtype) g(b) * wbt.transpose() * (M * c); f_doubleprime += (Dtype) - 1.0 * theta * g(b) * g(b) - (Dtype) 2.0 * (g(b) * (wbt.dot(M * p))) - (Dtype) g(b) * g(b) * wbt.transpose() * (M * wbt); p += g(b) * wbt.transpose(); d(b) = 0; dt_min = -f_prime / f_doubleprime; t_old = t; ++i; if (i < DIM) { b = sortedIndices[i]; t = SetOfT[b].second; dt = t - t_old; } } dt_min = std::max(dt_min, (Dtype)0.0); t_old += dt_min; #pragma omp parallel for for (int ii = i; ii < x_cauchy.rows(); ii++) { x_cauchy(sortedIndices[ii]) = x(sortedIndices[ii]) + t_old * d(sortedIndices[ii]); } c += dt_min * p; } /** * @brief find alpha* = max {a : a <= 1 and l_i-xc_i <= a*d_i <= u_i-xc_i} * @details [long description] * * @param FreeVariables [description] * @return [description] */ Dtype findAlpha(Vector<Dtype> &x_cp, Vector<Dtype> &du, std::vector<int> &FreeVariables) { Dtype alphastar = 1; const unsigned int n = FreeVariables.size(); for (unsigned int i = 0; i < n; i++) { if (du(i) > 0) { alphastar = std::min(alphastar, (uboundTemplate(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i)); } else { alphastar = std::min(alphastar, (lboundTemplate(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i)); } } return alphastar; } /** * @brief solving unbounded probelm * @details [long description] * * @param SubspaceMin [description] */ void SubspaceMinimization(Vector<Dtype> &x_cauchy, Vector<Dtype> &x, Vector<Dtype> &c, Vector<Dtype> &g, Vector<Dtype> &SubspaceMin) { Dtype theta_inverse = 1 / theta; std::vector<int> FreeVariablesIndex; for (int i = 0; i < x_cauchy.rows(); i++) { if ((x_cauchy(i) != uboundTemplate(i)) && (x_cauchy(i) != lboundTemplate(i))) { FreeVariablesIndex.push_back(i); } } const int FreeVarCount = FreeVariablesIndex.size(); Matrix<Dtype> WZ = Matrix<Dtype>::Zero(W.cols(), FreeVarCount); for (int i = 0; i < FreeVarCount; i++) WZ.col(i) = W.row(FreeVariablesIndex[i]); Vector<Dtype> rr = (g + theta * (x_cauchy - x) - W * (M * c)); // r=r(FreeVariables); Vector<Dtype> r = Matrix<Dtype>::Zero(FreeVarCount, 1); for (int i = 0; i < FreeVarCount; i++) r.row(i) = rr.row(FreeVariablesIndex[i]); // STEP 2: "v = w^T*Z*r" and STEP 3: "v = M*v" Vector<Dtype> v = M * (WZ * r); // STEP 4: N = 1/theta*W^T*Z*(W^T*Z)^T Matrix<Dtype> N = theta_inverse * WZ * WZ.transpose(); // N = I - MN N = Matrix<Dtype>::Identity(N.rows(), N.rows()) - M * N; // STEP: 5 // v = N^{-1}*v v = N.lu().solve(v); // STEP: 6 // HERE IS A MISTAKE IN THE ORIGINAL PAPER! Vector<Dtype> du = -theta_inverse * r - theta_inverse * theta_inverse * WZ.transpose() * v; // STEP: 7 Dtype alpha_star = findAlpha(x_cauchy, du, FreeVariablesIndex); // STEP: 8 Vector<Dtype> dStar = alpha_star * du; SubspaceMin = x_cauchy; for (int i = 0; i < FreeVarCount; i++) { SubspaceMin(FreeVariablesIndex[i]) = SubspaceMin(FreeVariablesIndex[i]) + dStar(i); } } public: void minimize(Problem<Dtype> &objFunc, Vector<Dtype> & x0) { objFunc_ = &objFunc; DIM = x0.rows(); if (objFunc.hasLowerBound()) { lboundTemplate = objFunc_->lowerBound(); }else { lboundTemplate = -Vector<Dtype>::Ones(DIM)* std::numeric_limits<Dtype>::infinity(); } if (objFunc.hasUpperBound()) { uboundTemplate = objFunc_->upperBound(); }else { uboundTemplate = Vector<Dtype>::Ones(DIM)* std::numeric_limits<Dtype>::infinity(); } theta = 1.0; W = Matrix<Dtype>::Zero(DIM, 0); M = Matrix<Dtype>::Zero(0, 0); xHistory.push_back(x0); Matrix<Dtype> yHistory = Matrix<Dtype>::Zero(DIM, 0); Matrix<Dtype> sHistory = Matrix<Dtype>::Zero(DIM, 0); Vector<Dtype> x = x0, g = x0; size_t k = 0; Dtype f = objFunc.value(x); objFunc.gradient(x, g); // conv. crit. auto noConvergence = [&](Vector<Dtype> & x, Vector<Dtype> & g)->bool { return (((x - g).cwiseMax(lboundTemplate).cwiseMin(uboundTemplate) - x).template lpNorm<Eigen::Infinity>() >= 1e-4); }; while (noConvergence(x, g) && (k < this->settings_.maxIter)) { Dtype f_old = f; Vector<Dtype> x_old = x; Vector<Dtype> g_old = g; // STEP 2: compute the cauchy point Vector<Dtype> CauchyPoint = Matrix<Dtype>::Zero(DIM, 1), c = Matrix<Dtype>::Zero(DIM, 1); GetGeneralizedCauchyPoint(x, g, CauchyPoint, c); // STEP 3: compute a search direction d_k by the primal method for the sub-problem Vector<Dtype> SubspaceMin; SubspaceMinimization(CauchyPoint, x, c, g, SubspaceMin); // STEP 4: perform linesearch and STEP 5: compute gradient Dtype alpha_init = 1.0; const Dtype rate = MoreThuente<Dtype, decltype(objFunc), 1>::linesearch(x, SubspaceMin-x , objFunc, alpha_init); // update current guess and function information x = x - rate*(x-SubspaceMin); f = objFunc.value(x); objFunc.gradient(x, g); xHistory.push_back(x); // prepare for next iteration Vector<Dtype> newY = g - g_old; Vector<Dtype> newS = x - x_old; // STEP 6: Dtype test = newS.dot(newY); test = (test < 0) ? -1.0 * test : test; if (test > 1e-7 * newY.squaredNorm()) { if (k < this->settings_.m) { yHistory.conservativeResize(DIM, k + 1); sHistory.conservativeResize(DIM, k + 1); } else { yHistory.leftCols(this->settings_.m - 1) = yHistory.rightCols(this->settings_.m - 1).eval(); sHistory.leftCols(this->settings_.m - 1) = sHistory.rightCols(this->settings_.m - 1).eval(); } yHistory.rightCols(1) = newY; sHistory.rightCols(1) = newS; // STEP 7: theta = (Dtype)(newY.transpose() * newY) / (newY.transpose() * newS); W = Matrix<Dtype>::Zero(yHistory.rows(), yHistory.cols() + sHistory.cols()); W << yHistory, (theta * sHistory); Matrix<Dtype> A = sHistory.transpose() * yHistory; Matrix<Dtype> L = A.template triangularView<Eigen::StrictlyLower>(); Matrix<Dtype> MM(A.rows() + L.rows(), A.rows() + L.cols()); Matrix<Dtype> D = -1 * A.diagonal().asDiagonal(); MM << D, L.transpose(), L, ((sHistory.transpose() * sHistory) * theta); M = MM.inverse(); } Vector<Dtype> ttt = Matrix<Dtype>::Zero(1, 1); ttt(0) = f_old - f; if (ttt.norm() < 1e-8) { // successive function values too similar break; } k++; } x0 = x; } }; } /* namespace cppoptlib */ #endif /* LBFGSBSOLVER_H_ */
for-5.c
/* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-ompexp" } */ /* LLVM LOCAL test not applicable */ /* { dg-require-fdump "" } */ extern void bar(int); void foo (int n) { int i; #pragma omp for schedule(guided) for (i = 0; i < n; ++i) bar(i); } /* { dg-final { scan-tree-dump-times "GOMP_loop_guided_start" 1 "ompexp" } } */ /* { dg-final { scan-tree-dump-times "GOMP_loop_guided_next" 1 "ompexp" } } */ /* { dg-final { cleanup-tree-dump "ompexp" } } */
GB_binop__first_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__first_fc64 // A.*B function (eWiseMult): GB_AemultB__first_fc64 // A*D function (colscale): GB_AxD__first_fc64 // D*A function (rowscale): GB_DxB__first_fc64 // C+=B function (dense accum): GB_Cdense_accumB__first_fc64 // C+=b function (dense accum): GB_Cdense_accumb__first_fc64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_fc64 // C=scalar+B GB_bind1st__first_fc64 // C=scalar+B' GB_bind1st_tran__first_fc64 // C=A+scalar (none) // C=A'+scalar (none) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = x ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FC64 || GxB_NO_FIRST_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__first_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__first_fc64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__first_fc64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__first_fc64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__first_fc64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__first_fc64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__first_fc64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__first_fc64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = Ax [p] ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB_bind1st_tran__first_fc64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_binop__second_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__second_int8 // A.*B function (eWiseMult): GB_AemultB__second_int8 // A*D function (colscale): GB_AxD__second_int8 // D*A function (rowscale): GB_DxB__second_int8 // C+=B function (dense accum): GB_Cdense_accumB__second_int8 // C+=b function (dense accum): GB_Cdense_accumb__second_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_int8 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar GB_bind2nd__second_int8 // C=A'+scalar GB_bind2nd_tran__second_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = bij #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = y ; // op is second #define GB_OP_IS_SECOND \ 1 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_INT8 || GxB_NO_SECOND_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__second_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__second_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__second_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__second_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__second_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__second_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__second_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__second_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB_bind2nd_tran__second_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
@mropes.nim.c
/* Generated by Nim Compiler v1.0.10 */ /* (c) 2019 Andreas Rumpf */ /* The generated code is subject to the original license. */ #define NIM_INTBITS 32 #include "nimbase.h" #include <string.h> #include <stdio.h> #undef LANGUAGE_C #undef MIPSEB #undef MIPSEL #undef PPC #undef R3000 #undef R4000 #undef i386 #undef linux #undef mips #undef near #undef far #undef powerpc #undef unix #define nimfr_(x, y) #define nimln_(x, y) typedef struct tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA; typedef struct TNimType TNimType; typedef struct TNimNode TNimNode; typedef struct RootObj RootObj; typedef struct NimStringDesc NimStringDesc; typedef struct TGenericSeq TGenericSeq; typedef struct tySequence__WwUFq9cJ2xKRlsAWVEHyPRg tySequence__WwUFq9cJ2xKRlsAWVEHyPRg; typedef struct tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g; typedef struct tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w; typedef struct tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ; typedef struct tyObject_GcStack__7fytPA5bBsob6See21YMRA tyObject_GcStack__7fytPA5bBsob6See21YMRA; typedef struct tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg; typedef struct tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ; typedef struct tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg; typedef struct tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw; typedef struct tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA; typedef struct tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw; typedef struct tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw; typedef struct tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg; typedef struct tyTuple__ujsjpB2O9cjj3uDHsXbnSzg tyTuple__ujsjpB2O9cjj3uDHsXbnSzg; typedef struct tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg; typedef struct tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ; typedef struct tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg; typedef tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* tyArray__USLYl0Lpkimm4FABiJ3ldA[4096]; typedef NU8 tyEnum_TNimKind__jIBKr1ejBgsfM33Kxw4j7A; typedef NU8 tySet_tyEnum_TNimTypeFlag__v8QUszD1sWlSIWZz7mC4bQ; typedef N_NIMCALL_PTR(void, tyProc__ojoeKfW4VYIm36I9cpDTQIg) (void* p, NI op); typedef N_NIMCALL_PTR(void*, tyProc__WSm2xU5ARYv9aAR4l0z9c9auQ) (void* p); struct TNimType { NI size; tyEnum_TNimKind__jIBKr1ejBgsfM33Kxw4j7A kind; tySet_tyEnum_TNimTypeFlag__v8QUszD1sWlSIWZz7mC4bQ flags; TNimType* base; TNimNode* node; void* finalizer; tyProc__ojoeKfW4VYIm36I9cpDTQIg marker; tyProc__WSm2xU5ARYv9aAR4l0z9c9auQ deepcopy; }; typedef NU8 tyEnum_TNimNodeKind__unfNsxrcATrufDZmpBq4HQ; struct TNimNode { tyEnum_TNimNodeKind__unfNsxrcATrufDZmpBq4HQ kind; NI offset; TNimType* typ; NCSTRING name; NI len; TNimNode** sons; }; struct RootObj { TNimType* m_type; }; struct TGenericSeq { NI len; NI reserved; }; struct NimStringDesc { TGenericSeq Sup; NIM_CHAR data[SEQ_DECL_SIZE]; }; struct tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA { RootObj Sup; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* left; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* right; NI L; NimStringDesc* data; }; typedef N_NIMCALL_PTR(void, tyProc__T4eqaYlFJYZUv9aG9b1TV0bQ) (void); struct tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g { NI refcount; TNimType* typ; }; struct tyObject_GcStack__7fytPA5bBsob6See21YMRA { void* bottom; }; struct tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w { NI len; NI cap; tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g** d; }; typedef tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ* tyArray__SiRwrEKZdLgxqz9a9aoVBglg[512]; typedef NU32 tyArray__BHbOSqU1t9b3Gt7K2c6fQig[24]; typedef tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg* tyArray__N1u1nqOgmuJN9cSZrnMHgOQ[32]; typedef tyArray__N1u1nqOgmuJN9cSZrnMHgOQ tyArray__B6durA4ZCi1xjJvRtyYxMg[24]; typedef tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw* tyArray__lh2A89ahMmYg9bCmpVaplLbA[256]; struct tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA { tyArray__lh2A89ahMmYg9bCmpVaplLbA data; }; typedef tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* tyArray__0aOLqZchNi8nWtMTi8ND8w[2]; struct tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw { tyArray__0aOLqZchNi8nWtMTi8ND8w link; NI key; NI upperBound; NI level; }; struct tyTuple__ujsjpB2O9cjj3uDHsXbnSzg { tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg* Field0; NI Field1; }; typedef tyTuple__ujsjpB2O9cjj3uDHsXbnSzg tyArray__LzOv2eCDGiceMKQstCLmhw[30]; struct tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg { NI len; tyArray__LzOv2eCDGiceMKQstCLmhw chunks; tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg* next; }; struct tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg { NI minLargeObj; NI maxLargeObj; tyArray__SiRwrEKZdLgxqz9a9aoVBglg freeSmallChunks; NU32 flBitmap; tyArray__BHbOSqU1t9b3Gt7K2c6fQig slBitmap; tyArray__B6durA4ZCi1xjJvRtyYxMg matrix; tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw* llmem; NI currMem; NI maxMem; NI freeMem; NI occ; NI lastSize; tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA chunkStarts; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* root; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* deleted; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* last; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* freeAvlNodes; NIM_BOOL locked; NIM_BOOL blockChunkSizeIncrease; NI nextChunkSize; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw bottomData; tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg heapLinks; }; struct tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg { NI stackScans; NI cycleCollections; NI maxThreshold; NI maxStackSize; NI maxStackCells; NI cycleTableSize; NI64 maxPause; }; struct tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ { NI counter; NI max; tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg* head; tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg** data; }; struct tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ { tyObject_GcStack__7fytPA5bBsob6See21YMRA stack; NI cycleThreshold; NI zctThreshold; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w zct; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w decStack; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w tempStack; NI recGcLock; tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg region; tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg stat; tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ marked; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w additionalRoots; NI gcThreadId; }; typedef NU8 tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg; typedef NIM_CHAR tyArray__9bKy7UA2LOi2vzOViufaW1Q[1024]; struct tySequence__WwUFq9cJ2xKRlsAWVEHyPRg { TGenericSeq Sup; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* data[SEQ_DECL_SIZE]; }; N_NIMCALL(void, nimGCvisit)(void* d, NI op); static N_NIMCALL(void, Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ)(void* p, NI op); static N_NIMCALL(void, TM__Vw9cfUOQOae9b9bzZBlucMZQg_3)(void); N_NIMCALL(void, nimRegisterGlobalMarker)(tyProc__T4eqaYlFJYZUv9aG9b1TV0bQ markerProc); N_NIMCALL(NimStringDesc*, mnewString)(NI len); N_LIB_PRIVATE N_NIMCALL(NI, len__9b0YRltzV3kNSE9aQTsG82wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a); N_NIMCALL(NimStringDesc*, setLengthStr)(NimStringDesc* s, NI newLen); N_NIMCALL(void*, newSeq)(TNimType* typ, NI len); static N_INLINE(void, asgnRef)(void** dest, void* src); static N_INLINE(void, incRef__AT1eRuflKWyTTBdLjEDZbg_3system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*, usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem)(void* usr); static N_INLINE(void, decRef__AT1eRuflKWyTTBdLjEDZbgsystem)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(void, rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); N_LIB_PRIVATE N_NOINLINE(void, addZCT__Y66tOYFjgwJ0k4aLz4bc0Q)(tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w* s, tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem)(tySequence__WwUFq9cJ2xKRlsAWVEHyPRg** s); N_NIMCALL(TGenericSeq*, setLengthSeqV2)(TGenericSeq* s, TNimType* typ, NI newLen); N_NIMCALL(void, unsureAsgnRef)(void** dest, void* src); N_NIMCALL(TGenericSeq*, incrSeqV3)(TGenericSeq* s, TNimType* typ); static N_INLINE(void, appendString)(NimStringDesc* dest, NimStringDesc* src); static N_INLINE(void, copyMem__M04YC71iJg1N7gBF3HZTngsystem)(void* dest, void* source, NI size); static N_INLINE(void, nimCopyMem)(void* dest, void* source, NI size); N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest, NI addlen); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA)(NimStringDesc* frmt, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0); N_LIB_PRIVATE N_NIMCALL(void, add__yG4AKzsBRS1W4MANDlXQeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, NimStringDesc* b); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___Z7W1o5nPSc3ExfO5f7j1Gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, NimStringDesc* b); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___ShdZ6VrAQkY0nWR9a39b9bGdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, newRope__dBdikNFB2Y7QJ9aVJE7dGHg)(NimStringDesc* data); N_NIMCALL(void*, newObj)(TNimType* typ, NI size); N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src); static N_INLINE(void, nimGCunrefNoCycle)(void* p); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__yShmEg9cffWxI7s5XzEKBow)(NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, insertInCache__yShmEg9cffWxI7s5XzEKBow_2)(NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(NI, hash__6PCYkKlCNhq9cnRLnqWKkwQ)(NimStringDesc* x); static N_INLINE(NIM_BOOL, eqStrings)(NimStringDesc* a, NimStringDesc* b); static N_INLINE(NIM_BOOL, equalMem__Bj9c9cm9cBM9coLsuNsT5BvZ9awsystem)(void* a, void* b, NI size); static N_INLINE(int, nimCmpMem)(void* a, void* b, NI size); N_LIB_PRIVATE N_NIMCALL(void, add__IM4kcMNkkOLJtqdEqSxR8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b); N_LIB_PRIVATE N_NIMCALL(void, failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A)(NimStringDesc* msg); N_NIMCALL(NimStringDesc*, rawNewString)(NI space); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, substr__2yh9cer0ymNRHlOOg8P7IuA)(NimStringDesc* s, NI first, NI last); N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x); N_LIB_PRIVATE N_NIMCALL(void, write__PArlm09bKklm2BLsCg6YtaA)(FILE* f, NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, open__gq12VLhVO0NBzUTnGgz4nw)(FILE** f, NimStringDesc* filename, tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg mode, NI bufSize); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__9bihNFg7Qajcg9arfx5cr9aHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, FILE* f); static N_INLINE(void, nimZeroMem)(void* p, NI size); static N_INLINE(void, nimSetMem__JE6t4x7Z3v2iVz27Nx0MRAmemory)(void* a, int v, NI size); N_LIB_PRIVATE N_NIMCALL(NI, readBuffer__f3MIZh4IV2qRTxlOpckbRA_2)(FILE* f, void* buffer, NI len); static N_INLINE(NCSTRING, nimToCStringConv)(NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(void, close__fU6ZlJAtQ9bre04EDZLdGsA_3)(FILE* f); N_LIB_PRIVATE N_NIMCALL(void, writeRope__FwuzOBq6SLlanVUstm8q9cA)(FILE* f, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__Wiam9c8x73Mtmbj0r4Ppikg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRope__LLRRC42xWBSkxzV9bsPu7lA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* head, NimStringDesc* filename); tyArray__USLYl0Lpkimm4FABiJ3ldA cache__WGMp5Wo1NlgbAMOysPIfmQ; extern TNimType NTI__ytyiCJqK439aF9cIibuRVpAg_; TNimType NTI__OFzf0kSiPTcNreUIeJgWVA_; extern TNimType NTI__rR5Bzr1D5krxoo1NcNyeMA_; extern TNimType NTI__77mFvmsOLKik79ci2hXkHEg_; TNimType NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_; TNimType NTI__USLYl0Lpkimm4FABiJ3ldA_; NI gCacheTries__5GfZTThHPBfB9bjRZdFluBw; NI gCacheMisses__fLRm9am8S0daYBVNK6JKyBg; NI gCacheIntTries__opyfsNv023Md1P05mqsDew; extern TNimType NTI__WwUFq9cJ2xKRlsAWVEHyPRg_; extern tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ gch__IcYaEuuWivYAS86vFMTS3Q; STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_4, "$", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_5, "ropes.nim(238, 20) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_6, "ropes.nim(250, 20) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_7, "ropes.nim(253, 20) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_8, "\012", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_9, "ropes.nim(263, 18) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_10, "[$1, $2, $3]", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_11, "FR_.len-=$1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_12, "} $1: ;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_13, "}$n", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_14, "FR_.len+=$1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_15, "void", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_16, ", ", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_17, "$1 $2;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_18, "typedef $1 $2 $2;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_19, "*", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_20, " ", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_21, ", NI $1Len_$2", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_22, " Result", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_23, "$1$2($3, $4)$5", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_24, "(*$1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_25, "static TNimType* $1;$n", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_26, "\011$1 = (TNimType*)hcrGetGlobal($2, \"$1\");$n", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_27, "extern TNimType $1;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_28, "NTI$1_", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_29, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_30, "$1.flags = $2;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_31, "$1.name = $2;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_32, "$1.nextType = nimTypeRoot; nimTypeRoot=&$1;$n", 45); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_33, "\011hcrRegisterGlobal($2, \"$1\", sizeof(TNimType), NULL, (void**)&$" "1);$n", 68); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_34, "TNimType $1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_35, "$1[$2]", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_36, "static TNimNode** $1;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_37, "\011hcrRegisterGlobal($3, \"$1\", sizeof(TNimNode*) * $2, NULL, (voi" "d**)&$1);$n", 74); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_38, "static TNimNode* $1[$2];$n", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_39, "$1[$2] = &$3;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_40, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$" "n$1.name = \"Field$3\";$n", 86); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_41, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_42, "$1.len = $2; $1.kind = 2;$n", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_43, "$1.node = &$2;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_44, "static N_NIMCALL(void, $1)(void* p, NI op)", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_45, "$1 a;$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_46, "a = ($1)p;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_47, "for ($1 = 0; $1 < $2; $1++) {$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_48, "($1 \? $1->$2 : 0)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_49, "$1.Sup", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_50, "#pragma pack(push, 1)$nstruct{", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_51, "};$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_52, "#pragma pack(pop)$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_53, "union{$n$1};$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_54, "$1 $2[SEQ_DECL_SIZE];$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_55, "$1 $2:$3;$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_56, "switch ($1.$2) {$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_57, "case $1 ... $2:$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_58, "(-2147483647 -1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_59, "IL64($1)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_60, "(IL64(-9223372036854775807) - IL64(1))", 38); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_61, "NIM_TRUE", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_62, "NIM_FALSE", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_63, "(($1) $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_64, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_65, "STRING_LITERAL($1, $2, $3);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_66, "static const struct {$n NI cap; void* allocator; NIM_CHAR data" "[$2+1];$n} $1 = { $2, NIM_NIL, $3 };$n", 101); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_67, "static const NimStringV2 $1 = {$2, (NimStrPayload*)&$3};$n", 58); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_68, "case $1:$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_69, "default:$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_70, "break;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_71, "} $n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_72, "$1.$2", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_73, "$1$3[$2]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_74, "$1 {$n$2$3$4}\012", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_75, "$1;\012", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_76, "N_NIMCALL_PTR(void, $1)(void*, NI);\012", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_77, "\011$1 = (N_NIMCALL_PTR(void, )(void*, NI)) hcrRegisterProc($3, \"$" "1\", (void*)$2);\012", 79); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_78, "$1.marker = $2;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_79, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_80, "$1.offset = $2;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_81, "NI $1;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_82, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_83, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o" "ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_84, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_85, "$1.flags = 1<<2;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_86, "$1.destructor = (void*)$2; $1.size = sizeof($3); $1.name = $4;$" "n", 64); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_87, "NimDT_$1_$2", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_88, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_89, "TNimNode* $1[$2];$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_90, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n", 74); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_91, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_92, "Result", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_93, "$N#line $2 $1$N", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_94, "struct {$1} GCFRAME_;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_95, "\011}BeforeRet_: ;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_96, "}$N", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_97, "\011$1 = ($3) hcrRegisterProc($4, \"$1\", (void*)$2);$n", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_98, "$1(*)$2", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_99, "static void* $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_100, "\011$1 = ($2) ($3$4));$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_101, "$2 $1;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_102, "\011$1 = ($2) hcrRegisterProc($3, \"$1\", (void*)$1);$n", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_103, "\011$1 = ($2) hcrGetProc($3, \"$1\");$n", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_104, " $1;$n", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_105, "\011$1 = ($2*)hcrGetGlobal($3, \"$1\");$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_106, "NIM_CHECK_SIZE($1, $2);$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_107, "typedef NI32 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_108, "typedef NU8 $1;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_109, "typedef NU16 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_110, "typedef NI64 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_111, "typedef $1_PTR($2, $3) $4;$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_112, "typedef struct {$nN_NIMCALL_PTR($2, ClP_0) $3;$nvoid* ClE_0;$n}" " $1;$n", 69); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_113, "typedef $1 $2[1];$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_114, "typedef $1 $2[$3];$n", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_115, " {$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_116, "char dummy;$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_117, "TY", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_118, "typedef $1 $2;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_119, "$1 $2 {$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_120, "$1 Field$2;$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_121, "typedef NU$2 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_122, "typedef NU8 $1[$2];$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_123, "Field$1", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_124, "NIM_CONST $1 $2 = $3;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_125, ",$n", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_126, "{$1, ($2*)&$3}", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_127, "{{$1, $1 | NIM_STRLIT_FLAG}", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_128, "(($1)&$2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_129, "{NIM_NIL,NIM_NIL}", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_130, "{(($1) $2),NIM_NIL}", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_131, "$1,$n", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_132, "$1", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_133, "{{$1}}", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_134, "{$1}$n", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_135, "{$1, (NimStrPayload*)&$2}", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_136, "extern NIM_CONST $1 $2;$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_137, "goto NIMSTATE_$#;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_138, "$2* $1;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_139, "\011NimThreadVars* NimTV_;$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_140, "static N_NIMCALL(void, $1)(void)", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_141, "$1 {$n$2$3$4}$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_142, "$1;$n", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_143, "//", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_144, "$#;$n", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_145, "$#($#);$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_146, "$# = $#;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_147, "NULL", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_148, "((NU8)($1))", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_149, "($4*)(($1)+($2)), ($3)-($2)+1", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_150, "($5*)($1)+(($2)-($4)), ($3)-($2)+1", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_151, "($4*)($1)+($2), ($3)-($2)+1", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_152, "($5*)(*$1)$4+($2), ($3)-($2)+1", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_153, "($5*)$1$4+($2), ($3)-($2)+1", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_154, "$1, $1Len_0", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_155, "(*$1)$3, $2", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_156, "$1$3, $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_157, "$1, $2", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_158, "$1.ClP_0($3$1.ClE_0);$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_159, "$1.ClE_0\? $1.ClP_0($3$1.ClE_0):(($4)($1.ClP_0))($2);$n", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_160, "$1.ClP_0($3$1.ClE_0)", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_161, "$1.ClE_0\? $1.ClP_0($3$1.ClE_0):(($4)($1.ClP_0))($2)", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_162, "(", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_163, ")", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_164, ";$n", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_165, ");$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_166, "[", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_167, ": ", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_168, "Result: ", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_169, "];$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_170, "]", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_171, "if ($1) goto $2;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_172, "if (!($1)) goto $2;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_173, "$1: ;$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_174, "!($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_175, "($3)((NU$2) ~($1))", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_176, "-($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_177, "((NI$2)-($1))", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_178, "($1 > 0\? ($1) : -($1))", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_179, "(($4)($1) + ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_180, "(($4)($1) - ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_181, "(($4)($1) * ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_182, "(($4)($1) / ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_183, "($4)((NU$5)($1) >> (NU$3)($2))", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_184, "($4)((NU$3)($1) << (NU$3)($2))", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_185, "($4)((NI$3)($1) >> (NU$3)($2))", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_186, "($4)($1 & $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_187, "($4)($1 | $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_188, "($4)($1 ^ $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_189, "(($1 <= $2) \? $1 : $2)", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_190, "(($1 >= $2) \? $1 : $2)", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_191, "($4)((NU$3)($1) + (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_192, "($4)((NU$3)($1) - (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_193, "($4)((NU$3)($1) * (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_194, "($4)((NU$3)($1) / (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_195, "($4)((NU$3)($1) % (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_196, "($1 == $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_197, "($1 <= $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_198, "($1 < $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_199, "((NU$3)($1) <= (NU$3)($2))", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_200, "((NU$3)($1) < (NU$3)($2))", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_201, "((NU64)($1) <= (NU64)($2))", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_202, "((NU64)($1) < (NU64)($2))", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_203, "((NU8)($1) == (NU8)($2))", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_204, "((NU8)($1) <= (NU8)($2))", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_205, "((NU8)($1) < (NU8)($2))", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_206, "($1 != $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_207, "($1.ClP_0 == $2.ClP_0 && $1.ClE_0 == $2.ClE_0)", 46); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_208, "($1)($2 $3 $4)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_209, "($#)($#)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_210, ".Sup", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_211, "$1.m_type == $2", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_212, "static TNimType* $#[2];$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_213, "sizeof($1)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_214, "$1->finalizer = (void*)$2;$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_215, "((NI)sizeof($1))", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_216, "((NI)alignof($1))", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_217, "((NI)offsetof($1, $2))", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_218, "(*($1*) ($2))", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_219, "(($1) ($2))", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_220, "(($1) (ptrdiff_t) ($2))", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_221, "(*($1*) (&$2))", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_222, "($1-1)", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_223, "$1 |= ((NU8)1)<<(($2) & 7);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_224, "($1- $2)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_225, "$1 |= ((NU16)1)<<(($2) & 15);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_226, "$1 |= ((NU32)1)<<(($2) & 31);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_227, "$1 |= ((NU64)1)<<(($2) & 63);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_228, "$1 &= ~(((NU8)1) << (($2) & 7));$n", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_229, "$1 &= ~(((NU16)1) << (($2) & 15));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_230, "$1 &= ~(((NU32)1) << (($2) & 31));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_231, "$1 &= ~(((NU64)1) << (($2) & 63));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_232, "$1 >= $2 && $1 <= $3", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_233, "$1 == $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_234, "(($1 &((NU8)1<<((NU)($2)&7U)))!=0)", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_235, "(($1 &((NU16)1<<((NU)($2)&15U)))!=0)", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_236, "(($1 &((NU32)1<<((NU)($2)&31U)))!=0)", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_237, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_238, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_239, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_240, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_241, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_242, "static NIM_CONST $1 $2 = $3;$n", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_243, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1" ")&7U));$n", 72); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_244, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_245, "$1 = 0;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_246, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=(($5)(1)<<(($1)%(sizeof($5" ")*8)));$n", 72); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_247, "$1 |=(($3)(1)<<(($2)%(sizeof($3)*8)));$n", 40); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_248, "$1.Field$2", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_249, "LOC$1.source", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_250, "LOC$#.dest", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_251, ".Field$1", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_252, ".$1", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_253, "TFrame $1;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_254, "if (!$1) goto $2;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_255, "goto $1;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_256, "TMP$1_", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_257, "static void* $#[$#] = {", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_258, "&&TMP$#_, ", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_259, "&&TMP$#_};$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_260, "goto *$#[$#];$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_261, "TMP$#_:$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_262, "case $1: $n$2break;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_263, "goto LA$1_;$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_264, "LA$1_: ;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_265, "NIMSTATE_$#:$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_266, "switch ($1) {$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_267, "default: __assume(0);$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_268, "goto BeforeRet_;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_269, "throw;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_270, "else", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_271, "throw $1;$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_272, "$n#pragma omp $4$nfor ($1 = $2; $1 <= $3; ++$1)", 47); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_273, "$n#pragma omp $5$nfor ($1 = $2; $1 <= $3; $1 += $4)", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_274, "case -1:$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_275, " goto BeforeRet_;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_276, "case $2: goto $1$2;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_277, "(((NI*) $1)[1] < 0)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_278, "((((NI*) $1.ClE_0)[1]) < 0)", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_279, "$1 N_NIMCALL(void, $2)(void) {$N", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_280, "\011int* nim_hcr_dummy_ = 0;$n\011NIM_BOOL nim_hcr_do_init_ = hcrRegi" "sterGlobal($1, \"module_initialized_\", 1, NULL, (void**)&nim_hcr_" "dummy_);$n", 137); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_281, "{$N", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_282, "\011TFrame FR_; FR_.len = 0;$N", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_283, "}$N$N", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_284, "N_LIB_EXPORT N_NIMCALL(void, $1)(void* handle, N_NIMCALL_PTR(vo" "id*, getProcAddr)(void*, char*)) {$N", 99); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_285, "static $2 $1;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_286, "\011$1 = ($2) $3($4, $5);$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_287, "NIM_EXTERNC N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 58); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_288, "N_LIB_EXPORT N_NIMCALL(void, HcrCreateTypeInfos)(void) {$N", 58); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_289, "$nN_LIB_PRIVATE const char* hcr_module_list[] = {$n", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_290, "\011$1,$n", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_291, "\011\"\"};$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_292, "$nN_LIB_EXPORT N_NIMCALL(void**, HcrGetImportedModules)() { ret" "urn (void**)hcr_module_list; }$n", 95); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_293, "$nN_LIB_EXPORT N_NIMCALL(char*, HcrGetSigHash)() { return \"$1\";" " }$n$n", 69); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_294, "static void* hcr_handle;$N", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_295, "N_LIB_EXPORT N_NIMCALL(void, $1)(void);$N", 41); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_296, "N_LIB_EXPORT N_NIMCALL(void, $1)(void*, N_NIMCALL_PTR(void*, ge" "tProcAddr)(void*, char*));$N", 91); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_297, "N_LIB_EXPORT N_NIMCALL(void, HcrCreateTypeInfos)(void);$N", 57); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_298, "\011$1();$N", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_299, "\011hcrInit((void**)hcr_module_list, $1, $2, $3, hcr_handle, nimGe" "tProcAddr);$n", 76); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_300, "\011$1(hcr_handle, nimGetProcAddr);$N", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_301, "\011hcrAddModule($1);\012", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_302, "\011HcrCreateTypeInfos();$N", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_303, "\011hcrRegisterGlobal($1, \"cmdCount\", sizeof(cmd_count), NULL, (vo" "id**)&cmd_count);$N", 82); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_304, "\011hcrRegisterGlobal($1, \"cmdLine\", sizeof(cmd_line), NULL, (void" "**)&cmd_line);$N", 79); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_305, "N_LIB_PRIVATE N_NIMCALL(void, $1)(void);$N", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_306, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_307, "/* Generated by Nim Compiler v$1 */$N/* (c) 2019 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N", 131); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_308, "/* Generated by Nim Compiler v$1 */$N/* (c) 2019 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n" " $5 */$N", 201); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_309, "#define NIM_INTBITS $1\012", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_310, "typedef struct {$1} NimThreadVars;$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_311, "#include \"$1\"$N", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_312, "#include $1$N", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_313, "--file:r\"$1\"$N", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_314, "\012[Symbols]$n$1", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_315, "/* Generated by Nim Compiler v$1 */$N/* (c) 2017 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N", 131); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_316, "__$1__", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_317, "#ifndef $1$n#define $1$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_318, "N_CDECL(void, NimMain)(void);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_319, "#endif /* $1 */$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_320, "var F={procname:$1,prev:framePtr,filename:$2,line:0};$n", 55); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_321, "framePtr = F;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_322, "var $1;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_323, "if ($1 == undefined) {$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_324, "if ($1 === undefined) {$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_325, "var $1 = null;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_326, "var $1_Idx = 0;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_327, "[$1]", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_328, "new $1($2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_329, "var $# = null;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_330, "var $#_Idx = 0;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_331, "var $# = $#;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_332, "return [$#, $#];$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_333, "return $#;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_334, "BeforeRet: do {$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_335, "} while (false);$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_336, "try {$n$1} catch (e) {$n alert(\"Unhandled exception:\\n\" + e.mes" "sage + \"\\n\"$n}", 77); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_337, "function $#() { return $#.apply(this, arguments); }$n", 53); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_338, "function $#($#) {$n$#$#$#$#$#", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_339, "arrayConstr($1, $2, $3)", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_340, "NTI$1", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_341, "var $1 = {size: 0,kind: $2,base: null,node: null,finalizer: nul" "l};$n", 68); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_342, "$1.base = $2;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_343, "\"$1\": {kind: 1, offset: $1, typ: $2, name: $3, len: 0, sons: nu" "ll}", 66); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_344, "var NNI$1 = {kind: 2, offset: 0, typ: null, name: null, len: $2" ", sons: {$3}};$n", 79); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_345, "var $1 = {size: 0, kind: $2, base: null, node: null, finalizer:" " null};$n", 72); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_346, "$1.node = NNI$2;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_347, "var NNI$1 = $2;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_348, "{kind: 2, len: $1, offset: 0, typ: null, name: null, sons: [$2]" "}", 64); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_349, "{kind: 1, offset: \"$1\", len: 0, typ: $2, name: $3, sons: null}", 62); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_350, "[$1, $2]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_351, "[setConstr($1), $2]", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_352, "{kind: 3, offset: \"$1\", len: $3, typ: $2, name: $4, sons: [$5]}", 63); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_353, "{kind: 1, offset: \"Field$1\", len: 0, typ: $2, name: \"Field$1\", " "sons: null}", 74); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_354, "Field$1: $2", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_355, "m_type: $1", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_356, "$#: ", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_357, "({$1})", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_358, "nimCopy(null, $1, $2)", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_359, "Tmp$1", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_360, "var $1 = $2, $3 = $1[0], $3_Idx = $1[1];$n", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_361, "$1 = nimCopy(null, $1, $2);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_362, "$1[0][0]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_363, "$1[0][1]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_364, "$1[0]", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_365, "$1[1]", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_366, "makeNimstrLit($1)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_367, "// line $2 \"$1\"$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_368, "F.line = $1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_369, "($1 || $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_370, "if ($1) $2 = true; else {", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_371, "$2 = $1;", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_372, "($1 && $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_373, "if (!$1) $2 = false; else {", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_374, "$1[0][$1[1]]", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_375, "($1 = $2, $1)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_376, "$1 = (($5 $2 $3) $4)", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_377, "(($1 $2 $3) $4)", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_378, "addInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_379, "($1 + $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_380, "subInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_381, "($1 - $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_382, "mulInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_383, "($1 * $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_384, "divInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_385, "Math.trunc($1 / $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_386, "modInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_387, "Math.trunc($1 % $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_388, "($1 / $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_389, "($1 << $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_390, "($1 >> $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_391, "($1 & $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_392, "($1 | $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_393, "($1 ^ $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_394, "nimMin($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_395, "nimMax($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_396, "($1 % $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_397, "negInt($1)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_398, "negInt64($1)", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_399, "absInt($1)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_400, "Math.abs($1)", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_401, "+($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_402, "~($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_403, "nimCharToStr($1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_404, "nimBoolToStr($1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_405, "cstrToNimstr(($1)+\"\")", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_406, "cstrToNimstr($1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_407, "(($1 $2) >>> $3)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_408, "($# == $# && $# == $#)", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_409, "var $1 = $2; $2 = $3; $3 = $1;$n", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_410, "var $1 = $2; $2 = $3; $3 = $1;", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_411, "$1 - 1", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_412, "subInt($1, 1)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_413, "if ($1 != null) { addChar($3, $2); } else { $3 = [$2]; }", 56); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_414, "if ($1 != null) { $4 += $2; } else { $4 = $2$3; }", 49); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_415, ".slice()", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_416, "if ($1 != null) { $4 = ($4).concat($2); } else { $4 = $2$3; }", 61); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_417, "if ($1 != null) { $3.push($2); } else { $3 = [$2]; }", 52); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_418, "var $1 = nimCopy(null, $2, $3);$n", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_419, "[$1].concat(", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_420, "($1 || []).concat(", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_421, "[$1],", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_422, "$1 || [],", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_423, "[$1])", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_424, "$1 || [])", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_425, "eqStrings($1, $2)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_426, "(cmpStrings($1, $2) <= 0)", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_427, "(cmpStrings($1, $2) < 0)", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_428, "($1 == null)", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_429, "($# == null && $# === 0)", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_430, "$1 = $2;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_431, "$1 = [$3]; $2 = 0;$n", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_432, "$1 = [[$2], 0];$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_433, "($1 \? 1:0)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_434, "($1 != null \? $2.length : 0)", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_435, "$1.length", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_436, "($1 != null \? ($2.length-1) : -1)", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_437, "$1 += $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_438, "$1 = addInt($3, $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_439, "$1 -= $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_440, "$1 = subInt($3, $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_441, "($1 == null \? $3 = mnewString($2) : $3.length = $2)", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_442, "if ($1 === null) $4 = [];\012 if ($4.length < $2) { " "for (var i=$4.length;i<$5;++i) $4.push($3); }\012 els" "e { $4.length = $5; }", 148); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_443, "SetCard($1)", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_444, "SetLt($1, $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_445, "SetLe($1, $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_446, "SetEq($1, $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_447, "SetMul($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_448, "SetPlus($1, $2)", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_449, "SetMinus($1, $2)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_450, "$1[$2] = true", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_451, "delete $1[$2]", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_452, "($1[$2] != undefined)", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_453, "$1 = new Array($2); for (var i=0;i<$2;++i) {$1[i]=$3;}", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_454, "[]", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_455, "($1.m_type == $2)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_456, "isObj($1.m_type, $2)", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_457, "$1 = null, $2 = 0;$n", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_458, "$1 = genericReset($3, $2);$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_459, "($1.slice($2))", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_460, "mnewString($1)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_461, "mnewString(0)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_462, "($1 = $2, $1[0]), $1[1]", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_463, "($1 = $2, $1)[0]", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_464, "($1.slice($2, $3+1))", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_465, "var $1 = $2;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_466, "Field$#: [$#, $#]", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_467, "Field$#: $#", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_468, "$#: [$#, $#]", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_469, "$#: $#", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_470, "{$1}", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_471, "(!!($1))", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_472, "(($1)|0)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_473, "if ($1[$2.$3]$4undefined) { raiseFieldError(makeNimstrLit($5));" " }$n", 67); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_474, "!==", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_475, "===", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_476, "chckIndx($1, $2, ($3 != null \? $3.length : 0)+$2-1)-$2", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_477, "($1)-$2", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_478, "$1.charCodeAt($2)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_479, "($1 $2)", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_480, "($1|0)", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_481, "($1 - ($2 $3))", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_482, "null", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_483, "chckRange($1, $2, $3)", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_484, "toJSStr($1)", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_485, "L$1: do {$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_486, "} while(false);$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_487, "else {$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_488, "if ($1) {$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_489, "L$1: while (true) {$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_490, "if (!$1) break L$2;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_491, "switch (toJSStr($1)) {$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_492, "default: $n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_493, "break BeforeRet;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_494, "break L$1;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_495, "$1 = nimCopy(null, $2, $3);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_496, "nimCopy($1, $2, $3);$n", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_497, "var $1 = $4; $2 = $1[0]; $3 = $1[1];$n", 38); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_498, "$# = [$#, $#];$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_499, "$1 = $2; $3 = $4;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_500, "try {$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_501, "--excHandler;$n} catch (EXC) {$n var prevJSError = lastJSError;" "$n lastJSError = EXC;$n --excHandler;$n", 102); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_502, "framePtr = $1;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_503, "lastJSError instanceof $1", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_504, "isObj(lastJSError.m_type, $1)", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_505, "if (lastJSError && ($1)) {$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_506, "var $1 = lastJSError;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_507, "lastJSError = prevJSError;$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_508, "raiseException($1, $2);$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_509, "$1 = true;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_510, "/* Generated by the Nim Compiler v$1 */$n/* (c) 2019 Andreas " "Rumpf */$n$nvar framePtr = null;$nvar excHandler = 0;$nvar lastJ" "SError = null;$nif (typeof Int8Array === \'undefined\') Int8Array " "= Array;$nif (typeof Int16Array === \'undefined\') Int16Array = Ar" "ray;$nif (typeof Int32Array === \'undefined\') Int32Array = Array;" "$nif (typeof Uint8Array === \'undefined\') Uint8Array = Array;$nif" " (typeof Uint16Array === \'undefined\') Uint16Array = Array;$nif (" "typeof Uint32Array === \'undefined\') Uint32Array = Array;$nif (ty" "peof Float32Array === \'undefined\') Float32Array = Array;$nif (ty" "peof Float64Array === \'undefined\') Float64Array = Array;$n", 633); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_511, "Deprecated", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_512, "Deprecated:", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_513, "\012<p><strong class=\"examples_text\">$1</strong></p>\012", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_514, "\012\\textbf{$1}\012", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_515, "<span class=\"Comment\">$1</span>", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_516, "\\spanComment{$1}", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_517, "<span class=\"Keyword\">$1</span>", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_518, "\\spanKeyword{$1}", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_519, "<span class=\"Operator\">$1</span>", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_520, "\\spanOperator{$1}", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_521, "<span class=\"StringLit\">$1</span>", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_522, "\\spanStringLit{$1}", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_523, "<span class=\"CharLit\">$1</span>", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_524, "\\spanCharLit{$1}", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_525, "<span class=\"DecNumber\">$1</span>", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_526, "\\spanDecNumber{$1}", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_527, "<span class=\"FloatNumber\">$1</span>", 35); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_528, "\\spanFloatNumber{$1}", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_529, "<a href=\"#$2\"><span class=\"Identifier\">$1</span></a>", 52); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_530, "\\spanIdentifier{$1}", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_531, "<a href=\"$1#$2\"><span class=\"Identifier\">$3</span></a>", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_532, "<span class=\"Identifier\">$1</span>", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_533, "<span><span class=\"Other\">{</span><span class=\"Other pragmadots" "\">...</span><span class=\"Other\">}</span></span><span class=\"prag" "mawrap\"><span class=\"Other\">$1</span><span class=\"pragma\">", 185); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_534, "\\spanOther{$1}", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_535, "</span><span class=\"Other\">$1</span></span>", 43); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_536, "<span class=\"Other\">$1</span>", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_537, "<a class=\"reference external\" href=\"$2\">$1</a>", 46); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_538, "<a href=\"$2#$1\"><span class=\"Identifier\">$1</span></a>", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_539, "$1 -> \"$2\";$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_540, "digraph $1 {$n$2}$n", 19); static N_NIMCALL(void, Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ)(void* p, NI op) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a; a = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)p; nimGCvisit((void*)(*a).left, op); nimGCvisit((void*)(*a).right, op); nimGCvisit((void*)(*a).data, op); } static N_NIMCALL(void, TM__Vw9cfUOQOae9b9bzZBlucMZQg_3)(void) { NI T1_; T1_ = (NI)0; for (T1_ = 0; T1_ < 4096; T1_++) { nimGCvisit((void*)cache__WGMp5Wo1NlgbAMOysPIfmQ[T1_], 0); } } N_LIB_PRIVATE N_NIMCALL(NI, len__9b0YRltzV3kNSE9aQTsG82wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a) { NI result; result = (NI)0; { if (!(a == NIM_NIL)) goto LA3_; result = ((NI) 0); } goto LA1_; LA3_: ; { result = ((*a).L > 0? ((*a).L) : -((*a).L)); } LA1_: ; return result; } static N_INLINE(void, incRef__AT1eRuflKWyTTBdLjEDZbg_3system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { (*c).refcount = (NI)((NU32)((*c).refcount) + (NU32)(((NI) 8))); } static N_INLINE(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*, usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem)(void* usr) { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* result; result = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; result = ((tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*) ((NI)((NU32)(((NI) (ptrdiff_t) (usr))) - (NU32)(((NI) 8))))); return result; } static N_INLINE(void, rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { addZCT__Y66tOYFjgwJ0k4aLz4bc0Q((&gch__IcYaEuuWivYAS86vFMTS3Q.zct), c); } static N_INLINE(void, decRef__AT1eRuflKWyTTBdLjEDZbgsystem)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { (*c).refcount = (NI)((NU32)((*c).refcount) - (NU32)(((NI) 8))); { if (!((NU32)((*c).refcount) < (NU32)(((NI) 8)))) goto LA3_; rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system(c); } LA3_: ; } static N_INLINE(void, asgnRef)(void** dest, void* src) { { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T5_; if (!!((src == NIM_NIL))) goto LA3_; T5_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T5_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem(src); incRef__AT1eRuflKWyTTBdLjEDZbg_3system(T5_); } LA3_: ; { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T10_; if (!!(((*dest) == NIM_NIL))) goto LA8_; T10_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T10_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem((*dest)); decRef__AT1eRuflKWyTTBdLjEDZbgsystem(T10_); } LA8_: ; (*dest) = src; } static N_INLINE(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem)(tySequence__WwUFq9cJ2xKRlsAWVEHyPRg** s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NI L; NI T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = ((*s) ? (*s)->Sup.len : 0); L = (NI)(T1_ - ((NI) 1)); result = (*s)->data[L]; unsureAsgnRef((void**) (&(*s)), (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) setLengthSeqV2(&((*s))->Sup, (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), ((NI) (L)))); return result; } static N_INLINE(void, nimCopyMem)(void* dest, void* source, NI size) { void* T1_; T1_ = (void*)0; T1_ = memcpy(dest, source, ((size_t) (size))); } static N_INLINE(void, copyMem__M04YC71iJg1N7gBF3HZTngsystem)(void* dest, void* source, NI size) { nimCopyMem(dest, source, size); } static N_INLINE(void, appendString)(NimStringDesc* dest, NimStringDesc* src) { { if (!!((src == NIM_NIL))) goto LA3_; copyMem__M04YC71iJg1N7gBF3HZTngsystem(((void*) ((&(*dest).data[(*dest).Sup.len]))), ((void*) ((*src).data)), ((NI) ((NI)((*src).Sup.len + ((NI) 1))))); (*dest).Sup.len += (*src).Sup.len; } LA3_: ; } N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, dollar___mZ66tEveFIQokq3arf8Klw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r) { NimStringDesc* result; NI T1_; result = (NimStringDesc*)0; T1_ = (NI)0; T1_ = len__9b0YRltzV3kNSE9aQTsG82wg(r); result = mnewString(((NI) (T1_))); result = setLengthStr(result, ((NI) 0)); { NimStringDesc* s; s = (NimStringDesc*)0; { tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack; if (!!((r == NIM_NIL))) goto LA5_; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1); asgnRef((void**) (&stack->data[0]), r); { while (1) { NI T9_; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it; T9_ = (stack ? stack->Sup.len : 0); if (!(((NI) 0) < T9_)) goto LA8; it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack)); { while (1) { NI T12_; if (!!(((*it).left == NIM_NIL))) goto LA11; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_)); T12_ = stack->Sup.len++; asgnRef((void**) (&stack->data[T12_]), (*it).right); it = (*it).left; } LA11: ; } s = (*it).data; result = resizeString(result, (s ? s->Sup.len : 0) + 0); appendString(result, s); } LA8: ; } } LA5_: ; } return result; } static N_INLINE(void, nimGCunrefNoCycle)(void* p) { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T1_; T1_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T1_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem(p); decRef__AT1eRuflKWyTTBdLjEDZbgsystem(T1_); } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, newRope__dBdikNFB2Y7QJ9aVJE7dGHg)(NimStringDesc* data) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NimStringDesc* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*) newObj((&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_), sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA)); (*result).Sup.m_type = (&NTI__OFzf0kSiPTcNreUIeJgWVA_); (*result).L = ((NI32)-((data ? data->Sup.len : 0))); T1_ = (NimStringDesc*)0; T1_ = (*result).data; (*result).data = copyStringRC1(data); if (T1_) nimGCunrefNoCycle(T1_); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___ShdZ6VrAQkY0nWR9a39b9bGdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; { if (!(a == NIM_NIL)) goto LA3_; result = b; } goto LA1_; LA3_: ; { if (!(b == NIM_NIL)) goto LA6_; result = a; } goto LA1_; LA6_: ; { result = newRope__dBdikNFB2Y7QJ9aVJE7dGHg(((NimStringDesc*) NIM_NIL)); (*result).L = (NI)(((*a).L > 0? ((*a).L) : -((*a).L)) + ((*b).L > 0? ((*b).L) : -((*b).L))); asgnRef((void**) (&(*result).left), a); asgnRef((void**) (&(*result).right), b); } LA1_: ; return result; } static N_INLINE(int, nimCmpMem)(void* a, void* b, NI size) { int result; result = (int)0; result = memcmp(a, b, ((size_t) (size))); return result; } static N_INLINE(NIM_BOOL, equalMem__Bj9c9cm9cBM9coLsuNsT5BvZ9awsystem)(void* a, void* b, NI size) { NIM_BOOL result; int T1_; result = (NIM_BOOL)0; T1_ = (int)0; T1_ = nimCmpMem(a, b, size); result = (T1_ == ((NI32) 0)); return result; } static N_INLINE(NIM_BOOL, eqStrings)(NimStringDesc* a, NimStringDesc* b) { NIM_BOOL result; NI alen; NI blen; { result = (NIM_BOOL)0; alen = (a ? a->Sup.len : 0); blen = (b ? b->Sup.len : 0); { if (!(alen == blen)) goto LA3_; { if (!(alen == ((NI) 0))) goto LA7_; result = NIM_TRUE; goto BeforeRet_; } LA7_: ; result = equalMem__Bj9c9cm9cBM9coLsuNsT5BvZ9awsystem(((void*) ((&a->data[((NI) 0)]))), ((void*) ((&b->data[((NI) 0)]))), ((NI) (alen))); goto BeforeRet_; } LA3_: ; }BeforeRet_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, insertInCache__yShmEg9cffWxI7s5XzEKBow_2)(NimStringDesc* s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NI h; NI T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; gCacheTries__5GfZTThHPBfB9bjRZdFluBw += ((NI) 1); T1_ = (NI)0; T1_ = hash__6PCYkKlCNhq9cnRLnqWKkwQ(s); h = (NI)(T1_ & ((NI) 4095)); result = cache__WGMp5Wo1NlgbAMOysPIfmQ[(h)- 0]; { NIM_BOOL T4_; T4_ = (NIM_BOOL)0; T4_ = (result == 0); if (T4_) goto LA5_; T4_ = !(eqStrings((*result).data, s)); LA5_: ; if (!T4_) goto LA6_; gCacheMisses__fLRm9am8S0daYBVNK6JKyBg += ((NI) 1); result = newRope__dBdikNFB2Y7QJ9aVJE7dGHg(s); asgnRef((void**) (&cache__WGMp5Wo1NlgbAMOysPIfmQ[(h)- 0]), result); } LA6_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__yShmEg9cffWxI7s5XzEKBow)(NimStringDesc* s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; { if (!((s ? s->Sup.len : 0) == ((NI) 0))) goto LA3_; result = NIM_NIL; } goto LA1_; LA3_: ; { result = insertInCache__yShmEg9cffWxI7s5XzEKBow_2(s); } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___Z7W1o5nPSc3ExfO5f7j1Gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, NimStringDesc* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = rope__yShmEg9cffWxI7s5XzEKBow(b); result = amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(a, T1_); return result; } N_LIB_PRIVATE N_NIMCALL(void, add__yG4AKzsBRS1W4MANDlXQeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, NimStringDesc* b) { unsureAsgnRef((void**) (&(*a)), amp___Z7W1o5nPSc3ExfO5f7j1Gg((*a), b)); } N_LIB_PRIVATE N_NIMCALL(void, add__IM4kcMNkkOLJtqdEqSxR8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { unsureAsgnRef((void**) (&(*a)), amp___ShdZ6VrAQkY0nWR9a39b9bGdQ((*a), b)); } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA)(NimStringDesc* frmt, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NI i; NI length; NI num; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; i = ((NI) 0); length = (frmt ? frmt->Sup.len : 0); result = NIM_NIL; num = ((NI) 0); { while (1) { NI start; if (!(i < length)) goto LA2; { if (!((NU8)(frmt->data[i]) == (NU8)(36))) goto LA5_; i += ((NI) 1); switch (((NU8)(frmt->data[i]))) { case 36: { add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_4)); i += ((NI) 1); } break; case 35: { i += ((NI) 1); add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[num]); num += ((NI) 1); } break; case 48 ... 57: { NI j; j = ((NI) 0); { while (1) { j = (NI)((NI)((NI)(j * ((NI) 10)) + ((NU8)(frmt->data[i]))) - ((NI) 48)); i += ((NI) 1); { NIM_BOOL T14_; T14_ = (NIM_BOOL)0; T14_ = ((frmt ? frmt->Sup.len : 0) <= i); if (T14_) goto LA15_; T14_ = !((((NU8)(frmt->data[i])) >= ((NU8)(48)) && ((NU8)(frmt->data[i])) <= ((NU8)(57)))); LA15_: ; if (!T14_) goto LA16_; goto LA10; } LA16_: ; } } LA10: ; num = j; { if (!((NI)((argsLen_0-1) + ((NI) 1)) < j)) goto LA20_; { NimStringDesc* T26_; if (!NIM_TRUE) goto LA24_; T26_ = (NimStringDesc*)0; T26_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T26_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_5)); appendString(T26_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T26_); } LA24_: ; } goto LA18_; LA20_: ; { add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[(NI)(j - ((NI) 1))]); } LA18_: ; } break; case 123: { NI j_2; i += ((NI) 1); j_2 = ((NI) 0); { while (1) { if (!(((NU8)(frmt->data[i])) >= ((NU8)(48)) && ((NU8)(frmt->data[i])) <= ((NU8)(57)))) goto LA30; j_2 = (NI)((NI)((NI)(j_2 * ((NI) 10)) + ((NU8)(frmt->data[i]))) - ((NI) 48)); i += ((NI) 1); } LA30: ; } num = j_2; { if (!((NU8)(frmt->data[i]) == (NU8)(125))) goto LA33_; i += ((NI) 1); } goto LA31_; LA33_: ; { { NimStringDesc* T40_; if (!NIM_TRUE) goto LA38_; T40_ = (NimStringDesc*)0; T40_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T40_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_6)); appendString(T40_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T40_); } LA38_: ; } LA31_: ; { if (!((NI)((argsLen_0-1) + ((NI) 1)) < j_2)) goto LA43_; { NimStringDesc* T49_; if (!NIM_TRUE) goto LA47_; T49_ = (NimStringDesc*)0; T49_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T49_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_7)); appendString(T49_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T49_); } LA47_: ; } goto LA41_; LA43_: ; { add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[(NI)(j_2 - ((NI) 1))]); } LA41_: ; } break; case 110: { add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8)); i += ((NI) 1); } break; case 78: { add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8)); i += ((NI) 1); } break; default: { { NimStringDesc* T58_; if (!NIM_TRUE) goto LA56_; T58_ = (NimStringDesc*)0; T58_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T58_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_9)); appendString(T58_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T58_); } LA56_: ; } break; } } LA5_: ; start = i; { while (1) { if (!(i < length)) goto LA60; { if (!!(((NU8)(frmt->data[i]) == (NU8)(36)))) goto LA63_; i += ((NI) 1); } goto LA61_; LA63_: ; { goto LA59; } LA61_: ; } LA60: ; } LA59: ; { NimStringDesc* T70_; if (!(start <= (NI)(i - ((NI) 1)))) goto LA68_; T70_ = (NimStringDesc*)0; T70_ = substr__2yh9cer0ymNRHlOOg8P7IuA(frmt, start, (NI)(i - ((NI) 1))); add__yG4AKzsBRS1W4MANDlXQeg(&result, T70_); } LA68_: ; } LA2: ; } return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UQfMnMPks8jKz20fTXQy9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_10), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__KOisMGxcPhz6CcSmxgwEQQ)(NI64 i) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NimStringDesc* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; gCacheIntTries__opyfsNv023Md1P05mqsDew += ((NI) 1); T1_ = (NimStringDesc*)0; T1_ = nimInt64ToStr(i); result = rope__yShmEg9cffWxI7s5XzEKBow(T1_); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KxpxlR6eqq3gRIOYTfR67w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_11), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IFeEbVhQpPGgxkLehuSiBA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_12), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BYiowJAm8zF7RBRISElaLg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_13), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZkZcMxwzInnijXy5kz1K3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_14), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(void, prepend__IM4kcMNkkOLJtqdEqSxR8A_2)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { unsureAsgnRef((void**) (&(*a)), amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(b, (*a))); } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___4cYKitaHx6RQ9azRtQsZp6w)(NimStringDesc* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = rope__yShmEg9cffWxI7s5XzEKBow(a); result = amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(T1_, b); return result; } N_LIB_PRIVATE N_NIMCALL(void, writeRope__FwuzOBq6SLlanVUstm8q9cA)(FILE* f, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r) { { NimStringDesc* s; s = (NimStringDesc*)0; { tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack; if (!!((r == NIM_NIL))) goto LA4_; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1); asgnRef((void**) (&stack->data[0]), r); { while (1) { NI T8_; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it; T8_ = (stack ? stack->Sup.len : 0); if (!(((NI) 0) < T8_)) goto LA7; it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack)); { while (1) { NI T11_; if (!!(((*it).left == NIM_NIL))) goto LA10; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_)); T11_ = stack->Sup.len++; asgnRef((void**) (&stack->data[T11_]), (*it).right); it = (*it).left; } LA10: ; } s = (*it).data; write__PArlm09bKklm2BLsCg6YtaA(f, s); } LA7: ; } } LA4_: ; } } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G9aA37gQrW88KHzpCAwhgjQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_15), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PoDv5ydEvGdd9aiIF9cOiAPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_16), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vzbf0XksfaFTXNoTT6BCwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_17), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lQVSDPkAFXHNoa1N7jYrNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_18), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6d8an6hdqiIrRjPW1wEh5Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_19), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gMbiWAc0IjihIq46IYhmAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_20), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uHsu7fLXac4OhMNd79bSJwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_21), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3WM9b4PeyDKoIDFMvYcQX3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_22), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___p4LhaCxKpUERrq9cB9b8Mp9cw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_23), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TbMwXzwNL7txOQADiTjwKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_24), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E0nDsXp7tY4mC1BnrrjWmA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_25), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mbjeaBETPixw9bUvyk31B6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_26), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AfR9bXoD9bcehKoM7F8O79bYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_27), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nlZFDYB4M9bmBbYqEropRVw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_28), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dwsIkeXQe0E8HKrzN9aRE5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_29), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fIR1FG0QPRsKvEYKq4tJUQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_30), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jADQs38xm62v1oxF2cSvEw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_31), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___DZV83DjWnQ9a19atC2oeswXg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_32), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sfvTjNjtOC86mU9bHczF6ow)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_33), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9ab1aKSDn70Vte0NcIItnaQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_34), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jadqNPnY9aM3oxYK6jarLrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_35), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LvsIDF8olc08xBiqCYIUog)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_36), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6Tfa1iP1ENVlWbe89cSELSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_37), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hKg2Id9cvzE5Dgl9cU31c4Vw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_38), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H3xXuIFdbz4MNb5T6BSfcQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_39), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ELXFo0GedkhGYj9bocTHZAg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_40), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9aLrcjgzGJE3f9ab2uR37jog)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_41), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3Q9c5iS9btBDBXZVoQktb1XQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_42), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MALQXTKXJv7x9a9c247satLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_43), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0nBiBCva6YS9a9bSV2Vr7Zxw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_44), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yyhPPkMkLJqWG6p8HGn9aoA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_45), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___t8gRNGR1flvaCNlBxuLn1A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_46), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xQaqlAwFuwxqBFixw7ewLg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_47), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2SWcbuU7RHQR0b8y9aJ9a5VQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_48), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gSgutt9b7GMWVGBkCt0UHAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_49), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vcuq0AWiVDndx4UH9cJ9cBRg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_50), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___l4wxq9cmPihXoF5xnDVNR1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_51), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zgEKWXsZtT6lqQ6XlgfrsA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_52), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uXZ30k0oJEqGPZW57O3dwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_53), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tTI9aMQiBZdiEeBIVh7QtYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_54), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VJBBlA9aMl5p0yYB1WzSMVg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_55), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jw4Sb0OSpKH1T5cLz7iyzA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_56), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0RQ2PINB4t8FjFlNUM6N9cQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_57), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LQ9bGxpANW8yeg5P9c0UYAaQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_58), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___f8tdlskieCnWysl9c9blzqZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_59), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KbFpNe1pZ7hIuQi7dp1dSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_60), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nunbo9aB0HmmYQJ3InIBEzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_61), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RBxLok7DyUB0aHl9bxPIl9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_62), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NARRjCd1x5Fr7NTTcoPRrw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_63), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NlLLwmZHOiJUpZfuk00AWA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_64), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mF9aI9b3hDjj53TD2C2gTrHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_65), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PafMws9cJ9arr9a0RVMoIHmAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_66), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3lAlmrWiRqEg9a9cd9a8kNhig)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_67), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___f8NIixSwWrk6SXQ3BFamWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_68), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TTRh79a14hh1gb0owIP1Y6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_69), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmeCjGna9cPfiHHcfqmKXjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_70), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FsfRVuOOBePjn9cQ9aK7Vh1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_71), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___paA0sar8RKZqiwEaDfWo2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_72), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jr9cXNQhhlLDfFJH4RSjeZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_73), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EnzikEr9bDhOR6GYxWuYSwQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_74), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QqzUiJcAEZE2azDhIWHrgg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_75), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___20ZujjIFPkyqvS2OmenEAA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_76), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vxo9ayk1xB18if39aZ1TBnKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_77), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NtQEfuK9bXszNTfYU57z19bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_78), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AKNexo4CH8G2vDeWW34Vpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_79), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LE3oWAmB5YDSDHm3LNHhCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_80), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___W83I2xs7lC32PrMs9bq4P2w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_81), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JKMGBJtXtDvc0NwxujFmZQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_82), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TA8WFV49atYpIneJatQWALw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_83), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nPenDL3j2Q6A1an1Cl3oCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_84), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TNkzce2Sd9bck2QRtketc8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_85), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kqRXw2WRJqDnfQK0N30ydw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_86), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BKnrQUIV2xGn2MO0RK09aUw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_87), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SCyrk9acEm3vLZhXCV1fGNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_88), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___erDe9aYc2BNxzH9brKlmtEBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_89), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HSAgkeH84eiEd8MfKIuBQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_90), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1AD3Wp47Hcdfg6PO2ac0NQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_91), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T11tCz9bIGT2CcftAwrDXZw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_92), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lS9bA1j3Ue6pp7sCliDsT8g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_93), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M3h9cTlVBrj2vakKBqQRlMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_94), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BBAyGuVoK6QA7nXfPUIYKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_95), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___g9b9arp3BWCGRHDe21SJso6w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_96), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___09aVguRR64dWfw4b6fKBcqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_97), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tgUnLdPVK0vRqC0pWxMClQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_98), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FBNsdfF5FNrY4P9cYQIfvZQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_99), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cB7zULPbG5vWWdCukRjdqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_100), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dpzmcz9a6kXbhFacdElIMOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_101), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AWFBEodxoi9a61KDUc9aiw1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_102), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vHbYzYlzLPcurSm0Hu8InQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_103), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nzT6Rke9c7tkW9b3XMmld2LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_104), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cCc2iMcL3MEBZTTL3LCW1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_105), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ahBYcGrhpPvM5dTdzCQBrQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_106), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XI9awM9a9aQ9cB9bcS7uDRsa1Rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_107), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cWNaGuyEpBbdBlD9b5nY1ug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_108), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6P67I9czJ9aa9aZzVyYWUiGlw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_109), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S4jE5dFDtcCC8ODzxaJk6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_110), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Msid9awGKVeVe7p3v7WfNQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_111), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xyRsdWsGY1DVVispyn0Xeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_112), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EPABzhs2B9atAvHV4CUTw2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_113), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2MhCcipNmSHgcDtN4cr8ng)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_114), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0ul9cDZYl7YkH1RhZBTd9c6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_115), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QFf4DPoOk6Jy59cL2OASJzw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_116), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7yDHbEsisDNKcqQHIRgOuQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_117), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GwVmUG4AZCEAP8dBk4TGHg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_118), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___q7DaQZqCe0lRO0rhBWzM0w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_119), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hGIvKp3CGssDQ2vSvfksxQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_120), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9c1P82lz6H9anMKDbz1vYNpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_121), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dbg9bsMENUwtF9aO45wEGG3Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_122), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ym0Pr6z8A9ajyOAgotpd9a9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_123), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___izqbVTMtpY7kMiTK4bPJ6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_124), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rouofEnBX1ok9aMXmOsKdHg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_125), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___C3GQZbey70223GyG307UFg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_126), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yxmLIVRKySYknm2wSBp9cpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_127), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8u7UPO7ZpaMkWoJRtZLlYQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_128), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xXT7cKE1NTiL4U2MdlA2yQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_129), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___44q9ak51X9b9bmuZ9cK4LsFWOg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_130), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___77dMna2dOod5LqwYkRMZGg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_131), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QXMcmOst45ThYFLo9cOKDiQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_132), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zldA3DCxzpAhONjlfz7iIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_133), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dnB3So2xw9c189c09a9cc9b4hxA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_134), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___r2gXVULKoAtQjkgjf0Z4wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_135), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VsLzrOz1nS9cRBBz9ccZfETQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_136), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tRSKshYob5uzZE3eBVe59cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_137), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vcbf2lEZaiSjbAHwgt9aKXw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_138), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sb2NV56uvmvOtYkgVsaVQQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_139), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7STLi75js8HXlmFg7Abt9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_140), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5O50gePV9adn3wgFGWjlOLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_141), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9a3Y7eeGNXkOCLUktwxzN9ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_142), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ng8dczn37bLzoM9bsVdPwjQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_143), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___boICAAvO1zkTlYDOuEaj6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_144), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LeuvM3mIc6pSNktpm9cHSVw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_145), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mxQQ2vwZhwfDagj5SEXeHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_146), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___x2NKZw9brJpylbwEtLfx9a9bg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_147), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmT2Gs9cB7RN9cmo9c9cBpfKsA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_148), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RiPFNabSvay09bAW4Jic2ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_149), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___efSHgbCUYoX1lUK7M9aj4Pg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_150), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vmgih7rhd9cXUC9cEBz2cwXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_151), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rB3209aHcqpT39anNUezpSjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_152), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___x85Q1O2QUnYbstPlxUCyAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_153), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___L3AeZ1n9aK4C1jsBCeaCmlQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_154), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ebmRHYtM9cCbYF6WvKDfQ9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_155), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qE1JtEDDOvP6J49a9cv9aK1Dg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_156), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ctvQ2lU9b9bnVVpNP4GhIo2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_157), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8bHx2qDxS2yWIId1X52mqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_158), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kTDR7D9c9aomjcaUQOmKJ9csg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_159), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1tj59chZC08k4TWYeZiqDnQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_160), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___533QKY9a8quvLM1SsLE1JfQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_161), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uFJUSitn9c1Tw6cF9cZf6x6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_162), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G8iCcDovsaw25PkF7wHs0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_163), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SY4U2QvmoQxocaG8MOmyHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_164), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bhkFYKbURxGcJnKpswdr2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_165), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lTsL0bi6njxzDh9c8A32r2w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_166), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___k4VEB3kaBL72FRQN8buzSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_167), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YbQIA9cHUESCyYT1WEeIVbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_168), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___66KauNYQRukYNgmb6bVXEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_169), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S550SlHmWbDpD7rs0J2lrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_170), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGnLi1DjaBomQ9c9a6MOCA5g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_171), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bEKtSmboScaCP8PPnlOWqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_172), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZpwWwpfBXgcQ6xoLOH4CJw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_173), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GHW5yjG8N9c2BQBun6aBJzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_174), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yup67SPGRVcwMdmZwc9cSag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_175), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ec65mR1N7BSL9cmUa3z9czvA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_176), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ibyK70G44kCK9cN8nAkxyGA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_177), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H9b69aGZGrLOiKWQdd30yQ9bg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_178), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Te7bvH18PbGe5siNJ9aDTTA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_179), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MUaBvSw0MHw3qQi9bYavAmg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_180), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bWYxjLMocXEvYgQQcC63rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_181), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZpcNBrQMfioSvQNxKHhu9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_182), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gywCjjjPZobIva6liQWNLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_183), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6PDHoyz05lEjxGNE0k0ikw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_184), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AXGsBlGV5DoEOwPJSl9bdJw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_185), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ygzR9aJ6oM1bZTq4Z2lNO3Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_186), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uYVc6UX8hcaEdrHosUQAOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_187), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AlV8xJkjCXujAUesHxezgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_188), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___L9asecuKwevQN2h9cWzyv6oA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_189), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nZD9cadh12dcqTFsXBHbCRg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_190), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dz1JHdrf1p9bPB9ad2dZBtYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_191), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0MUu7DVBoaLHTVUZe9bKoIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_192), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___29aIWEGnJW0wnITIeSKWfFg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_193), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n2CigWG38YNInkiL4n8g7A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_194), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bb3v9bDRLv9c9bcQzGH9c5H4Gw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_195), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tkJq8W3gQVDjuu9aT3THC6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_196), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oyQkqbRkRzo43y6iRevkaA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_197), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YuphtPwdJHG6BUJOVa9bX3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_198), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EQxs5xa4FNWtMfcvmFZ9cMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_199), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5YbjRZxm0g3SrdnL73aQaw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_200), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MEALpIIbc0cKMcjQ7Xckzg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_201), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yUc5o9ax9c9asIVNkfprLRPpA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_202), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4JrnABFfF3UTQ3nO9a6mXzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_203), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bkAwkKoaz09cAQo9arQjGA0A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_204), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7N9bV9cjVBHs9ciAhz7vgdI9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_205), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QX9cU2fNK0jJrZNDQKnAycA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_206), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vTbVjc6faJqdBrTckFLLWQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_207), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___v4k9cDtOUzGyUHJbnJ7kQKg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_208), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0ym49cR6ES8k9bYWsnh1fELA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_209), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Jx78R9a9anGvjjocCaP8YgIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_210), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___s0lnM9cZDB9bOREa4Fx1leBw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_211), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aT7p9bNEmP3LxrK3OhspnSw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_212), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mV75vMLuQ8rrQEUzNz6llA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_213), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jhVz7tKuf0heLM2D3nL0gw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_214), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___c4YKWXetPKpaUUF7Qft2gA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_215), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rCIIoKC0OrXhpuTFTIZn0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_216), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lXaYcLcHHuQ46VvpH6Qr2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_217), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___duX6hgjmpJtFFdvJVuoafg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_218), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GNSb4l0oRsR1gu66azz1LQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_219), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LGbUtKnsZL8FcQiQN7sWEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_220), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___e8Xf9ajw9cRlpuqnFnlEuSpA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_221), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nVQhtKHyPC8pvPbUAUBU7A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_222), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bI5GhokFUA9bgO9av819cgdBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_223), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qTicKO8EMC9cWGOyybIz4WQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_224), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yZHx0qMqBvbhmZ0fMuAP6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_225), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YQzyPnY5vKAqE2RyLX0cew)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_226), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cIILAsA6BeRrvHfloZIscg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_227), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IwDTuHqkGn7wW16ga2ktSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_228), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lbkoHJP5AIgE86vP7MmlKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_229), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9b84wNYrm79cLYfx9bsPNHjPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_230), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___K5ihI3kW9cFBh6sKlfEpJwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_231), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nEiBK88oEGnvYfkiei9cyJA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_232), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Psy1qActyEYmIhrRo2KkJA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_233), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cZzkwYphs086zWiuLotXLA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_234), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kPsYd8d9cco3hhqO7CEAFeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_235), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BbOsdTh4ZRNKmiISHDyg3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_236), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Py40oiVtYdIelNuiQQjpjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_237), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QzVlk7tEXgagMWC19aLvbkg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_238), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qxufH5vUl9aY2l9cFq39bnVwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_239), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jiTCvQQpgMU0bTrdVuECiw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_240), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n4OrLXC1r9a83k5wz2NoWxQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_241), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bJpxHYPJaxWBQn6QxwBA4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_242), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fOn9b5Ij3ytw2Ui9a2CPI5zw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_243), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zJU3FoYOdJ9bmuODPmqtgdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_244), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1MXpJAdeOMc2XMg5H7t9aSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_245), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VNAv31sqVgxrd9aXeFF5wYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_246), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MULS9c8dKz2mJ1U9a9cMyTCYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_247), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5TB09c2Iz60T0YagbSbI5RQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_248), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NIzUqj4Mr1E3EKy0AkJaXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_249), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yQdCkIARIVr9aqI8oVxi9cQw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_250), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WYvjnWcyRjjjI0lasIi1YA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_251), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hR4oq6WdDjEl0JIvQtvUlg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_252), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___84GQPNcrIJtbrzuA7JnMPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_253), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SqZEI7bxySjmJX4GsXyvKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_254), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___c1f569aWpTd825BTnv9bq4Xg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_255), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ibl3qMPOrpGT2x8X7vmbeQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_256), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bBcuDHMXr6Kz1tr7BzD9aKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_257), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aDvifvZOUmduC6Unfm69bKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_258), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5kuxCbMO8PVJc9aJbXScUOQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_259), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Uu9cBz7dxPVDFhF9aLzWecyQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_260), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WWt3il4CHPiYP10KdNLrWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_261), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hc7hMh137dtaNdd3qw28EQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_262), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XWz49cQA2QiZaLkqHBU5L3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_263), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Au81R9a68Rv3gwlPtvDarPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_264), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yw741acxvsUs9cOX9cuiDj9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_265), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T9caGByKkBhaXSZ6fCJLIdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_266), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JmTWN8YiVKTZuvCYW2XNZA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_267), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Kbv8OIo8zpawh7SNMbfgkA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_268), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___B0OBOTOJQENvDd71LJ9b19bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_269), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___22ELRKd9bDuNug6qvIihS3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_270), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ddrHnMlEhcHznkXv27msmQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_271), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yhJ9aDxHfJqHvWO0i6N9bukQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_272), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MLJpsW0DAZYB8lAgq09cUjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_273), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8tWfSjtTOlDafxpQPvChAA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_274), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xKLwwPkFSVy2Dtn9cuJ78xw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_275), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hdRijZdoPR3UGq9aUw2zFDQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_276), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZjQc8bFVF8ePFYxjN0iVVg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_277), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SiqB8gWmdYKb4vtgqYrrMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_278), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2Ixv9aZ9bvpNaVAVzYBJlUPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_279), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HoXSbgR7plMG7Fef0fcy9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_280), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H1Ma2EXqegHnMqzJZ4SA1g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_281), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jpXTCDNVjIi5r4hbHN5SVQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_282), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4L62Yp9bLO2ZDcvBG9bSvP9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_283), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MCSdS9cTdQvttqiM9azLzkDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_284), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E9bSTz8DQ4tgiLV9avQjFgFA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_285), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3CQpPXVDiNqC3jKO8Juliw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_286), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___w50CkyHBltcyR8rWxttZCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_287), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fmEfDTfNDkVDxWi9c0O6D2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_288), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___k9bgPIs43oLgxnk1l4TNQaw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_289), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5MqeIopvDuA9aozxL79cQ88g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_290), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Zp9bMZDO5tEkvVLTxiKsBkA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_291), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___j5FZyaqnqjc2dcsUkAp28Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_292), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EbvvG9awBeRKzx8xuBIb7TA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_293), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9a8besSQa09cOOt9b9cgdVwY9aQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_294), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oVKF7oq59cRGAaMpvWzNWbw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_295), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7ru3bwKuSx4Sc8ilsBmX3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_296), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MDIdJXTVckPj57aO7LMVgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_297), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vQDE0VOBftnrpkVsM9cme4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_298), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bmR9bM9b0qqEqU0QJKnmLQnA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_299), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___88tWbH31SmOWJjgJ7RnfHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_300), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___t1CB59bEwlxfHZhNwNNz1bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_301), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YbLM7ZajsWOFLl4iSo0Krg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_302), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rH7Ns9bqAnnfkukwBIlz9bKg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_303), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zx9ctq3Ffe9aysjoWhZOzevQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_304), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T9a21DAzFCa3OqRooKKtkqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_305), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Y4DThr9bpMbmoKpvgT1rYwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_306), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___811qrD9bMr21weOkImaKvIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_307), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YNifhKTQWQRf1atK7E3Qmg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_308), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YfbBxPLyPvVS6F2y9bSUFIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_309), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___OBvl4G6evYkvK9b9bClFGqNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_310), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___pHsLkkx9bTDctZjmJqwCYRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_311), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ksH6NowTz9bh4eMOdyaiR1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_312), args, argsLen_0); return result; } static N_INLINE(void, nimSetMem__JE6t4x7Z3v2iVz27Nx0MRAmemory)(void* a, int v, NI size) { void* T1_; T1_ = (void*)0; T1_ = memset(a, v, ((size_t) (size))); } static N_INLINE(void, nimZeroMem)(void* p, NI size) { nimSetMem__JE6t4x7Z3v2iVz27Nx0MRAmemory(p, ((int) 0), size); } static N_INLINE(NCSTRING, nimToCStringConv)(NimStringDesc* s) { NCSTRING result; result = (NCSTRING)0; { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = (s == NIM_NIL); if (T3_) goto LA4_; T3_ = ((*s).Sup.len == ((NI) 0)); LA4_: ; if (!T3_) goto LA5_; result = ""; } goto LA1_; LA5_: ; { result = ((NCSTRING) ((*s).data)); } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__9bihNFg7Qajcg9arfx5cr9aHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, FILE* f) { NIM_BOOL result; tyArray__9bKy7UA2LOi2vzOViufaW1Q buf; NI bpos; NI blen; NI btotal; NI rtotal; NIM_BOOL T27_; NI T28_; { result = (NIM_BOOL)0; nimZeroMem((void*)buf, sizeof(tyArray__9bKy7UA2LOi2vzOViufaW1Q)); bpos = ((NI) 1024); blen = ((NI) 1024); btotal = ((NI) 0); rtotal = ((NI) 0); { NimStringDesc* s; s = (NimStringDesc*)0; { tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack; if (!!((r == NIM_NIL))) goto LA4_; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1); asgnRef((void**) (&stack->data[0]), r); { while (1) { NI T8_; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it; NI spos; NI slen; T8_ = (stack ? stack->Sup.len : 0); if (!(((NI) 0) < T8_)) goto LA7; it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack)); { while (1) { NI T11_; if (!!(((*it).left == NIM_NIL))) goto LA10; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_)); T11_ = stack->Sup.len++; asgnRef((void**) (&stack->data[T11_]), (*it).right); it = (*it).left; } LA10: ; } s = (*it).data; spos = ((NI) 0); slen = (s ? s->Sup.len : 0); rtotal += slen; { while (1) { NI n; if (!(spos < slen)) goto LA13; { if (!(bpos == blen)) goto LA16_; bpos = ((NI) 0); blen = readBuffer__f3MIZh4IV2qRTxlOpckbRA_2(f, ((void*) ((&buf[(((NI) 0))- 0]))), ((NI) 1024)); btotal += blen; { if (!(blen == ((NI) 0))) goto LA20_; result = NIM_FALSE; goto BeforeRet_; } LA20_: ; } LA16_: ; n = (((NI)(blen - bpos) <= (NI)(slen - spos)) ? (NI)(blen - bpos) : (NI)(slen - spos)); { NIM_BOOL T24_; T24_ = (NIM_BOOL)0; T24_ = equalMem__Bj9c9cm9cBM9coLsuNsT5BvZ9awsystem(((void*) ((&buf[(bpos)- 0]))), ((void*) ((NI)(((NI) (nimToCStringConv(s))) + spos))), ((NI) (n))); if (!!(T24_)) goto LA25_; result = NIM_FALSE; goto BeforeRet_; } LA25_: ; spos += n; bpos += n; } LA13: ; } } LA7: ; } } LA4_: ; } T27_ = (NIM_BOOL)0; T28_ = (NI)0; T28_ = readBuffer__f3MIZh4IV2qRTxlOpckbRA_2(f, ((void*) ((&buf[(((NI) 0))- 0]))), ((NI) 1)); T27_ = (T28_ == ((NI) 0)); if (!(T27_)) goto LA29_; T27_ = (btotal == rtotal); LA29_: ; result = T27_; }BeforeRet_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__Wiam9c8x73Mtmbj0r4Ppikg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename) { NIM_BOOL result; FILE* f; result = (NIM_BOOL)0; f = (FILE*)0; result = open__gq12VLhVO0NBzUTnGgz4nw(&f, filename, ((tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg) 0), ((NI) -1)); { if (!result) goto LA3_; result = equalsFile__9bihNFg7Qajcg9arfx5cr9aHA(r, f); close__fU6ZlJAtQ9bre04EDZLdGsA_3(f); } LA3_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRope__LLRRC42xWBSkxzV9bsPu7lA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* head, NimStringDesc* filename) { NIM_BOOL result; FILE* f; result = (NIM_BOOL)0; f = (FILE*)0; { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = open__gq12VLhVO0NBzUTnGgz4nw(&f, filename, ((tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg) 1), ((NI) -1)); if (!T3_) goto LA4_; { if (!!((head == NIM_NIL))) goto LA8_; writeRope__FwuzOBq6SLlanVUstm8q9cA(f, head); } LA8_: ; close__fU6ZlJAtQ9bre04EDZLdGsA_3(f); result = NIM_TRUE; } goto LA1_; LA4_: ; { result = NIM_FALSE; } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T3CpMgcFHzYracJ80CUZBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_313), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6wQcdZnh9aH29ay5rwY6M5fA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_314), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y39ant8iE9bjKB0kbkRCAibQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_315), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RKXvZR1cmZW5dfjtFQCG3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_316), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nEA33x9cMfuJw3ZiGbn25iw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_317), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0xK6HolrLvVFWil73hZYbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_318), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z2c9cvs0wVVVqTEZ3Qwe9bfw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_319), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AxDJCYpgPoquRsZtiOnpRw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_320), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dU9cenGIcVUltUO1088LhYQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_321), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TLpRy9aDJ1Ni4vccOIoiMbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_322), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RzB0z3UV9bb4kXUEGyS9crRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_323), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z1QwTAihBHnxe59cytXnhmw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_324), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XZnCV59at0sqX6ShEjlFLgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_325), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YLzwVVtf4fuPYZVeMQOa0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_326), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CtS8L8cOLTsSuQ10mtHsvw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) NIM_NIL), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mPpmmd13MIZLTbd1oOdSkw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_327), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Th3qC4WgcAhWPSlLw7vZ9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_328), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3RPy0XXevrEBMts1Mb9arGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_329), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gqwqalZtiJtCgAF9bY5S6qQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_330), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G9bYX9bu7ufcttiARCDUJ0qg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_331), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___W0CV9bE9bNiLgazfFZjoQCBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_332), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ecC7jlB6gBWrt0K9byHohPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_333), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hFzCKQOJ8Eao2AJk5HOvxA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_334), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___62079cK9bsws1aAJqEmAGo6w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_335), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hO1UTpWJhaojnhUyqfmgPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_336), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___wlKCT75QSpBNooI9a2xvWeQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uD0SC9bUeWpB9cK7V1aBT9aNQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_337), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Uez7zQbKzeDFToq2Yh43bA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_338), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JbygmsEkVsyK85BPVFvwbg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_339), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FLXrAGf7HFTHIGh8Xuickg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_340), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hmfCuT8fgBmRlPR25L7ZOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_341), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HUHatwko3S0fuszXQAOSQQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_342), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gGKEcvCOVzpTQoSXzO01Dw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_343), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LMnNsJkYlruXHnF5LV9c3pA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_344), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uJ11bTQ8dBBAX88A2cyICw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_345), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2D3IUNoEAKKLxuRqVNosPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_346), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___o7SGM9buciKf5BOjTvMKA7w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_347), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ht0mWR3LosfEZ8SopJcmEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_348), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GweM9byC8cQI9cehUzlYVs5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_349), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Xnze9a4kYSwHurdPnhyNGzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_350), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGaOrvR5YSM9cGUajaqcNOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_351), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GF60428RM29aXV0LYutm9aOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_352), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ciTj4q9cGhcXiXY9bPemZVvw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_353), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HLoe040Vi0LPzmTid9aLGdw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_354), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tnP9cO5PduJRSEeqtm9bocEg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_355), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S6XcU2shl8EfYxL7utXbwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_356), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3GvB8fuMNh8BXF8IoORCxw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_357), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RhAtD9c9aECDorIc8rDhMF9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_358), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CSdlEV0i9aXEHNuC1G9aIEbw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_359), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4SLS9cx2c8VCFIilepFlOeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_360), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___amX0pef5rA4JAmWZ6ZB2Nw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_361), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xAta147ahLKNrJMPPP5B6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_362), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sshAiIx49ba6saVSAWuyFuA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_363), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmulmJw2SZspd0rz2PYvQw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_364), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UFeu00R8dNoyzL8vy54mnQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_365), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qYiwFpynEwFeSf3Aa2sS0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_366), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6xseTZmgyslBQb6RMm9b4wA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_367), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KsZXXO4zKP47iruPcSEryQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_368), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TUxzei0sBfo3GESRTg1T5w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_369), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ikDBM4Dyw9c2kuwAAswRyOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_370), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ht9cduX4yJQKi2Gi685ag5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_371), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Wsnl5zC9cCEBdwJcHgpLf0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_372), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___deWmrKhbFG0MxH9cDr9cnhfQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_373), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HiCTlq0dXhMZvpDtUGWGQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_374), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aagcnoz4kFWlzsoVgR9b0NQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_375), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oYhFcOWR4tEylepRJJLrlA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_376), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3RBmOS8xzFTxpuGVryQycg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_377), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___apXghcMDCUp9col7jN5spHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_378), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cNvJ1SVovK9b29bKmwKyiijw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_379), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0mbMVYCe5Qwl9aQOKV3sh3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_380), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___03lrwELd9clj29bFkdXAVxkw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_381), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8croAZ6oMdSPXHbIisuppw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_382), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TDLJ9ciKDBoW4ouZs855Csg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_383), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Mk2KRdMWX4H3L9aBEG2elgQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_384), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___pFXgvxsz2L5f27ImZwJwzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_385), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n9aTlv49bCxoRKQNZiWsaW2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_386), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y3oNivo8px1XzxmB9b2OY5g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_387), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Pnqkcr360suaX84kwXMuCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_388), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FA4ohw0aOufzzLhmw9aUAhA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_389), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SWZi8EY4Pz39bBPSp9cbtZMg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_390), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XaBXRInsoVU7DBc2WK8dzg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_391), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NdMO5d09brFwLfDc8ciTSqQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_392), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E62TlyqwqpEwqcA0YTjttw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_393), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___m4T7v0qnGpOgwmMenKcgwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_394), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SKTmZPSgcdPr3Du3ia9b9czg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_395), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ItxAXpnPzfUbYRPsHgKrPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_396), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ggqZXIgPaS71ubw22cYODw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_397), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LLnl4aDVJynim7LQvfJKLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_398), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ob6yLhv7QvbU9bdZj8Nw2kA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_399), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qfsHROU9aHSaYGq3tpw1XDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_400), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___j9bcJJvtd9bur0VZUQL3ibgA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_401), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___32ITt7hKDrhn9bXvKbmnE9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_402), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZAOkVi5SmgPcGpCSuSRXVA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_403), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___smDIOmjGgf8ZP9bfDyv43bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_404), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1jtIbjhXi2wH1iWPyC9bgAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_405), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NPgb4kECDcV8MICSil6Rjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_406), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cQHGAtgSLYV7mm9bnVGYGRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_407), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Q4LBu2cVl8IcNTrtxd6B6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_408), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M36w8F9bFwighD3K39bvtVWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_409), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Wm11wQtuJBQgTy9a39apz0eA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_410), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0bUw514mSumiNnSjkD0bqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_411), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6hxDi5nlebu1DFLqpYq5lw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_412), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GkWgkK8SyjrFfWjGRwKWrw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_413), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oubCLvBtU9aRB9bhG2vbCDeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_414), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KTcAQx04UE87HYZ48ZBm2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_415), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Y6zpqvbZwK8tJZiKs9agbGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_416), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2OGTIxEeE0xFVRpz5TxKyg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_417), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0xZtTB2eXM1dRd9aneL5VPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_418), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___amO46kEKgIeOmW50ayV6nA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_419), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3lABfXU9aXZsyfylYizY8KA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_420), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5JCQx3oDHEcLdsEz6Rx0Rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_421), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dTtf7fil83VcW2Mkkr7scw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_422), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___88NG6Rr5xfTcA6hqLfZ2iw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_423), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1DWSTPxvqlc4A2xRDmjZDw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_424), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y5Z6ewsHLxj9ctzxTLPCLmw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_425), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CHBd5pGE9c8nq4KNqM8K48g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_426), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y2h2X887dhz5sEoD4C8ezQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_427), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dQfg2HrsVY6E7P22Nis1MA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_428), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0b2Bm7vpM8YAMKp9cuAwg3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_429), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1Hh3EN9c4pkzdKB09bo9c9aTBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_430), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AOSgPOjXfsLWEICRXv3U2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_431), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gN4yb6p4ql6iVJOPAjLEJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_432), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WIg2bxfQLkmzIdOv1JkRqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_433), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3Klw9agVDELeF44OQ6PnRiA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_434), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LL6jCaqBGLwC1sCgmCAEhQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_435), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S9b9bs03lj0NJlhXUmrylsnA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_436), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fphSfWWyYSWLARtGIpYB9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_437), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___As9aDT7fkqstj16MQnIGPhA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_438), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___eAZ21NmzzIsugeSSkcxIkQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_439), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___D2dSwFjTnRSmeKOoMm6w0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_440), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HlU9bV2X0HOPcGJnQlGm9c9aQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_441), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___p2lIQAdDBUpuVZML6ecUOg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_442), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5hzyGWCNjqgqPj0O7sSnkg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_443), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___l1wvVBeU1Nnie8cWddgPCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_444), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yVZN2jQzbJwg3E9cehLff9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_445), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E9br9b8BVYaWzg6CXcn9c6EXw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_446), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qPugJ1Nc2L1EdGwEF0AJ0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_447), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HzZyrXo2QFynm1T8X76cCw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_448), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___O2nyVw4tGD6MMc6u7I9bH9cA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_449), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IQYZUimFiAV9axFM9c64hKjA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_450), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RCJU8UTq9cE0Jsi59anAbTIQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_451), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S6vmSaSCgC4V2L5H7OWeZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_452), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Eqr9cgWCkrZrUG3sg0CawIQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_453), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9c1lq60gbfPY9cyjQN4YouTQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_454), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1nMXoOe6cENU7004pnh6wQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_455), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ALynLzo8zWvno8ZxASdm4A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_456), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tlkWMVJPsx9aWUbp8FMjQ4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_457), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xPW5KjObCPL2lJmHFoqfjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_458), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mTh2rYVPWUnI8B7kU3NWUg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_459), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aoMj8hrcFi4HlPDZ9a9alpig)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_460), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yj64cHk9ajrzJI39bfpBfOVA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_461), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bY6R9buTsrqJYQAuD39cegOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_462), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___U9b6hkqS6N7XIWr0gy8z9bug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_463), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2MwhwhkHOiavfXQl9aey8nA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_464), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JRV6DlpqdegYGLcFjNPv0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_465), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ryMkoQkM4zAjyp0800DrDQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_466), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___iW9bjdQoXkul7L0e76qo8XQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_467), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___i3z9am8Hzy69bSo575pRdzGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_468), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SkAQPSnCyiRvin57XULW4A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_469), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Bym8FwH29aQE8fth9ar38yJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_470), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CbbQqCp6itJgwKVRfTr69ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_471), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HWgoOloM1oqcI9aZ9bEkoBhg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_472), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Anf1UHjOzz9aHgMOgtnEPZA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_473), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tDrtnFWakp63hyE9cfImgZw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_474), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JwpI2xnYNfR68HstfDi1yQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_475), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___23SvbIxPpf5MIOga79arr6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_476), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uVZXJGmbOGIG9bfkI4ZDwJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_477), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UxL9a0Hh7Km0Z0DIk7hp9cBA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_478), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QxiH9aM0po7vA19b2s1CjdEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_479), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FZt89ajG3TKAhfL9aW4s7hcA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_480), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5GaE39bOOeQZy3EFOEIy5QA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_481), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SA9cvbR3uc9cP50nnaEBJctw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_482), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KweYGQ9bFYg76nmoxpk8ksA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_483), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AhY63HjLy2bPe9bslUNBuBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_484), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3m7YwdrxIvOkmvfnm5JYSA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_485), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TEWiK8QWtRTCIQ9av7sW8LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_486), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9an6bUHwpxqyL2kgNHX3MEg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_487), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kLwAORKb0c4oFgFTN9aEN8Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_488), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Qm29ctdy9c4sqKctTsqiBWIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_489), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UyNt2Asj9aa2ScoGVo9cCnNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_490), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xXvQyblNYV215UGR9cTka7Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_491), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LYjQOKn1i9ccw8AFlvPGkCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_492), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___THj0xNXkqJf6reD7exsGbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_493), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3oFXAbir9c7XcKzu9bpgAM9bA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_494), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4sbi76q7ZLqpKbD3pwJ59bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_495), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Q9cOQGrP4lOdbYHXMQ1yZtg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_496), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0AX4Q6cA8nOXUagvzFqt0A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_497), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qQ3g8SwjZoIFAay85NaiEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_498), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M0TByFCTj9bbOkDSRpFz3LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_499), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___OikfyLf8HmjI9auYLFoaVqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_500), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3KVF9aLACI1h11BqZrkzjNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_501), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ial810twbEzfkHaHMFYNCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_502), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z7wCJf0WipOQOQ4ZZNBIEw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_503), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Xpm9cGf2grEXdjAQV9arqWBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_504), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sqxyWwlLrfrdyc9b3BINcXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_505), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ztLQ2Orupb9b9b3KrCvoK9cbQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_506), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PI6febxsdTbySkLsIEqHKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_507), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGRyuC9caCxfdM1i8W4fjgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_508), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vWWA89aSvs5QwAFN4Jdr2IA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_509), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRopeIfNotEqual__Wiam9c8x73Mtmbj0r4Ppikg_2)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename) { NIM_BOOL result; result = (NIM_BOOL)0; { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = equalsFile__Wiam9c8x73Mtmbj0r4Ppikg(r, filename); if (!!(T3_)) goto LA4_; result = writeRope__LLRRC42xWBSkxzV9bsPu7lA(r, filename); } goto LA1_; LA4_: ; { result = NIM_FALSE; } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lQuk161wRVxbYxfH80Iwcw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_510), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UQrwMIIitnm9cEflSXdCkPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_511), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___A9aKFJUF6ZjJQfrcPHJigOQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_512), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8ehuHmXS8omgqFrdYMsPBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_513), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2Opo6JkHmCRmDA87qcGfvg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_514), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___C7jQ1fH79bR8HRQrbJjFKDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_515), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2eu2gmgXiDUZkBgTVqD7pg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_516), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cCI1wZSoDB14achJW7ZFSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_517), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dkLAWa1dMAcGEAyfUZ59bRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_518), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___DuvwOyJJ9b2gpVM9cV7DCFSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_519), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4MBgNtJLOyqbjfGytl2OTw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_520), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___336bx9aXX7GZckfWQE5Jy3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_521), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IbsmsXdtDOH7pLpzh9cmAOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_522), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cGelOO9b6sliTnobJf6XAsg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_523), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aNorSJCSJyyDo7w0s6eynA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_524), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BYRFs7dwiqyMIzbsx9cDq8Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_525), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TavFv5xK0dxxJCk9b4v34zg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_526), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9aAWQyBOqadJYgBT29bzliAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_527), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zpFS2Xy9cmoAoqCFSUQj1gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_528), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Nz9cwOtMmcX2gklRogKhyEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_529), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YGYo0XYmypYw3N26AYh7ug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_530), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___e8Z4ajz6IErIB0a6mpq4Wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_531), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___eqn09cqDPu9csxGUOSa2untg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_532), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rZ5o6ziDKz4d3bfaN54Dgg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_533), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YGa4o1aenD9cjoU03CAgtqQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_534), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___b2PLtFwpZkVmYhHWvW4i1Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_535), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ctY4Nx9aQFC9bl9c2wbRLoFYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_536), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xsFAphqq4CRpmuZ79bXVLrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_537), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SSpcZv60d0mAp5H4Mb5hpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_538), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TtzOadDB4I9a89cWej19a2PNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_539), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KKiSvh9a121M0uSQjcJhhMg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_540), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(void, compiler_ropesInit000)(void) { { nimRegisterGlobalMarker(TM__Vw9cfUOQOae9b9bzZBlucMZQg_3); gCacheTries__5GfZTThHPBfB9bjRZdFluBw = ((NI) 0); gCacheMisses__fLRm9am8S0daYBVNK6JKyBg = ((NI) 0); gCacheIntTries__opyfsNv023Md1P05mqsDew = ((NI) 0); } } N_LIB_PRIVATE N_NIMCALL(void, compiler_ropesDatInit000)(void) { static TNimNode* TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[4]; static TNimNode TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[5]; NTI__OFzf0kSiPTcNreUIeJgWVA_.size = sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA); NTI__OFzf0kSiPTcNreUIeJgWVA_.kind = 17; NTI__OFzf0kSiPTcNreUIeJgWVA_.base = (&NTI__ytyiCJqK439aF9cIibuRVpAg_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[0] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, left); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].typ = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].name = "left"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[1] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, right); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].typ = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].name = "right"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[2] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, L); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].typ = (&NTI__rR5Bzr1D5krxoo1NcNyeMA_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].name = "L"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[3] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, data); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].typ = (&NTI__77mFvmsOLKik79ci2hXkHEg_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].name = "data"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].len = 4; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].kind = 2; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].sons = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[0]; NTI__OFzf0kSiPTcNreUIeJgWVA_.node = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0]; NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.size = sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*); NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.kind = 22; NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.base = (&NTI__OFzf0kSiPTcNreUIeJgWVA_); NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.marker = Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ; NTI__USLYl0Lpkimm4FABiJ3ldA_.size = sizeof(tyArray__USLYl0Lpkimm4FABiJ3ldA); NTI__USLYl0Lpkimm4FABiJ3ldA_.kind = 16; NTI__USLYl0Lpkimm4FABiJ3ldA_.base = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_); }
morphology.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y % % MM MM O O R R P P H H O O L O O G Y Y % % M M M O O RRRR PPPP HHHHH O O L O O G GGG Y % % M M O O R R P H H O O L O O G G Y % % M M OOO R R P H H OOO LLLLL OOO GGG Y % % % % % % MagickCore Morphology Methods % % % % Software Design % % Anthony Thyssen % % January 2010 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Morphology is the application of various kernels, of any size or shape, to an % image in various ways (typically binary, but not always). % % Convolution (weighted sum or average) is just one specific type of % morphology. Just one that is very common for image bluring and sharpening % effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring. % % This module provides not only a general morphology function, and the ability % to apply more advanced or iterative morphologies, but also functions for the % generation of many different types of kernel arrays from user supplied % arguments. Prehaps even the generation of a kernel from a small image. */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/color-private.h" #include "magick/channel.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor-private.h" #include "magick/morphology.h" #include "magick/morphology-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/prepress.h" #include "magick/quantize.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" /* Other global definitions used by module. */ #define Minimize(assign,value) assign=MagickMin(assign,value) #define Maximize(assign,value) assign=MagickMax(assign,value) /* Integer Factorial Function - for a Binomial kernel */ #if 1 static inline size_t fact(size_t n) { size_t l,f; for(f=1, l=2; l <= n; f=f*l, l++); return(f); } #elif 1 /* glibc floating point alternatives */ #define fact(n) ((size_t)tgamma((double)n+1)) #else #define fact(n) ((size_t)lgamma((double)n+1)) #endif /* Currently these are only internal to this module */ static void CalcKernelMetaData(KernelInfo *), ExpandMirrorKernelInfo(KernelInfo *), ExpandRotateKernelInfo(KernelInfo *, const double), RotateKernelInfo(KernelInfo *, double); /* Quick function to find last kernel in a kernel list */ static inline KernelInfo *LastKernelInfo(KernelInfo *kernel) { while (kernel->next != (KernelInfo *) NULL) kernel=kernel->next; return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelInfo() takes the given string (generally supplied by the % user) and converts it into a Morphology/Convolution Kernel. This allows % users to specify a kernel from a number of pre-defined kernels, or to fully % specify their own kernel for a specific Convolution or Morphology % Operation. % % The kernel so generated can be any rectangular array of floating point % values (doubles) with the 'control point' or 'pixel being affected' % anywhere within that array of values. % % Previously IM was restricted to a square of odd size using the exact % center as origin, this is no longer the case, and any rectangular kernel % with any value being declared the origin. This in turn allows the use of % highly asymmetrical kernels. % % The floating point values in the kernel can also include a special value % known as 'nan' or 'not a number' to indicate that this value is not part % of the kernel array. This allows you to shaped the kernel within its % rectangular area. That is 'nan' values provide a 'mask' for the kernel % shape. However at least one non-nan value must be provided for correct % working of a kernel. % % The returned kernel should be freed using the DestroyKernelInfo method % when you are finished with it. Do not free this memory yourself. % % Input kernel defintion strings can consist of any of three types. % % "name:args[[@><]" % Select from one of the built in kernels, using the name and % geometry arguments supplied. See AcquireKernelBuiltIn() % % "WxH[+X+Y][@><]:num, num, num ..." % a kernel of size W by H, with W*H floating point numbers following. % the 'center' can be optionally be defined at +X+Y (such that +0+0 % is top left corner). If not defined the pixel in the center, for % odd sizes, or to the immediate top or left of center for even sizes % is automatically selected. % % "num, num, num, num, ..." % list of floating point numbers defining an 'old style' odd sized % square kernel. At least 9 values should be provided for a 3x3 % square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc. % Values can be space or comma separated. This is not recommended. % % You can define a 'list of kernels' which can be used by some morphology % operators A list is defined as a semi-colon separated list kernels. % % " kernel ; kernel ; kernel ; " % % Any extra ';' characters, at start, end or between kernel defintions are % simply ignored. % % The special flags will expand a single kernel, into a list of rotated % kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree % cyclic rotations, while a '>' will generate a list of 90-degree rotations. % The '<' also exands using 90-degree rotates, but giving a 180-degree % reflected kernel before the +/- 90-degree rotations, which can be important % for Thinning operations. % % Note that 'name' kernels will start with an alphabetic character while the % new kernel specification has a ':' character in its specification string. % If neither is the case, it is assumed an old style of a simple list of % numbers generating a odd-sized square kernel has been given. % % The format of the AcquireKernal method is: % % KernelInfo *AcquireKernelInfo(const char *kernel_string) % % A description of each parameter follows: % % o kernel_string: the Morphology/Convolution kernel wanted. % */ /* This was separated so that it could be used as a separate ** array input handling function, such as for -color-matrix */ static KernelInfo *ParseKernelArray(const char *kernel_string) { KernelInfo *kernel; char token[MaxTextExtent]; const char *p, *end; register ssize_t i; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ MagickStatusType flags; GeometryInfo args; kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = UserDefinedKernel; kernel->next = (KernelInfo *) NULL; kernel->signature = MagickCoreSignature; if (kernel_string == (const char *) NULL) return(kernel); /* find end of this specific kernel definition string */ end = strchr(kernel_string, ';'); if ( end == (char *) NULL ) end = strchr(kernel_string, '\0'); /* clear flags - for Expanding kernel lists thorugh rotations */ flags = NoValue; /* Has a ':' in argument - New user kernel specification FUTURE: this split on ':' could be done by StringToken() */ p = strchr(kernel_string, ':'); if ( p != (char *) NULL && p < end) { /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, kernel_string, (size_t) (p-kernel_string)); token[p-kernel_string] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); /* Size handling and checks of geometry settings */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 1.0; /* then width = 1 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ kernel->width = (size_t)args.rho; kernel->height = (size_t)args.sigma; /* Offset Handling and Checks */ if ( args.xi < 0.0 || args.psi < 0.0 ) return(DestroyKernelInfo(kernel)); kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi : (ssize_t) (kernel->width-1)/2; kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi : (ssize_t) (kernel->height-1)/2; if ( kernel->x >= (ssize_t) kernel->width || kernel->y >= (ssize_t) kernel->height ) return(DestroyKernelInfo(kernel)); p++; /* advance beyond the ':' */ } else { /* ELSE - Old old specification, forming odd-square kernel */ /* count up number of values given */ p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ for (i=0; p < end; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); } /* set the size of the kernel - old sized square */ kernel->width = kernel->height= (size_t) sqrt((double) i+1.0); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ } /* Read in the kernel values from rest of input string argument */ kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); kernel->minimum=MagickMaximumValue; kernel->maximum=(-MagickMaximumValue); kernel->negative_range = kernel->positive_range = 0.0; for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); if ( LocaleCompare("nan",token) == 0 || LocaleCompare("-",token) == 0 ) { kernel->values[i] = nan; /* this value is not part of neighbourhood */ } else { kernel->values[i] = StringToDouble(token,(char **) NULL); ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } } /* sanity check -- no more values in kernel definition */ GetNextToken(p,&p,MaxTextExtent,token); if ( *token != '\0' && *token != ';' && *token != '\'' ) return(DestroyKernelInfo(kernel)); #if 0 /* this was the old method of handling a incomplete kernel */ if ( i < (ssize_t) (kernel->width*kernel->height) ) { Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); for ( ; i < (ssize_t) (kernel->width*kernel->height); i++) kernel->values[i]=0.0; } #else /* Number of values for kernel was not enough - Report Error */ if ( i < (ssize_t) (kernel->width*kernel->height) ) return(DestroyKernelInfo(kernel)); #endif /* check that we recieved at least one real (non-nan) value! */ if (kernel->minimum == MagickMaximumValue) return(DestroyKernelInfo(kernel)); if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */ return(kernel); } static KernelInfo *ParseKernelName(const char *kernel_string) { char token[MaxTextExtent]; const char *p, *end; GeometryInfo args; KernelInfo *kernel; MagickStatusType flags; ssize_t type; /* Parse special 'named' kernel */ GetNextToken(kernel_string,&p,MaxTextExtent,token); type=ParseCommandOption(MagickKernelOptions,MagickFalse,token); if ( type < 0 || type == UserDefinedKernel ) return((KernelInfo *) NULL); /* not a valid named kernel */ while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';')) p++; end = strchr(p, ';'); /* end of this kernel defintion */ if ( end == (char *) NULL ) end = strchr(p, '\0'); /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, p, (size_t) (end-p)); token[end-p] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif /* special handling of missing values in input string */ switch( type ) { /* Shape Kernel Defaults */ case UnityKernel: if ( (flags & WidthValue) == 0 ) args.rho = 1.0; /* Default scale = 1.0, zero is valid */ break; case SquareKernel: case DiamondKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: if ( (flags & HeightValue) == 0 ) args.sigma = 1.0; /* Default scale = 1.0, zero is valid */ break; case RingKernel: if ( (flags & XValue) == 0 ) args.xi = 1.0; /* Default scale = 1.0, zero is valid */ break; case RectangleKernel: /* Rectangle - set size defaults */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 3; /* then width = 3 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ if ( (flags & XValue) == 0 ) /* center offset if not defined */ args.xi = (double)(((ssize_t)args.rho-1)/2); if ( (flags & YValue) == 0 ) args.psi = (double)(((ssize_t)args.sigma-1)/2); break; /* Distance Kernel Defaults */ case ChebyshevKernel: case ManhattanKernel: case OctagonalKernel: case EuclideanKernel: if ( (flags & HeightValue) == 0 ) /* no distance scale */ args.sigma = 100.0; /* default distance scaling */ else if ( (flags & AspectValue ) != 0 ) /* '!' flag */ args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */ else if ( (flags & PercentValue ) != 0 ) /* '%' flag */ args.sigma *= QuantumRange/100.0; /* percentage of color range */ break; default: break; } kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args); if ( kernel == (KernelInfo *) NULL ) return(kernel); /* global expand to rotated kernel list - only for single kernels */ if ( kernel->next == (KernelInfo *) NULL ) { if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 45.0); else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); } return(kernel); } MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string) { KernelInfo *kernel, *new_kernel; char *kernel_cache, token[MaxTextExtent]; const char *p; if (kernel_string == (const char *) NULL) return(ParseKernelArray(kernel_string)); p=kernel_string; kernel_cache=(char *) NULL; if (*kernel_string == '@') { ExceptionInfo *exception=AcquireExceptionInfo(); kernel_cache=FileToString(kernel_string+1,~0UL,exception); exception=DestroyExceptionInfo(exception); if (kernel_cache == (char *) NULL) return((KernelInfo *) NULL); p=(const char *) kernel_cache; } kernel=NULL; while (GetNextToken(p,(const char **) NULL,MaxTextExtent,token), *token != '\0') { /* ignore extra or multiple ';' kernel separators */ if (*token != ';') { /* tokens starting with alpha is a Named kernel */ if (isalpha((int) ((unsigned char) *token)) != 0) new_kernel=ParseKernelName(p); else /* otherwise a user defined kernel array */ new_kernel=ParseKernelArray(p); /* Error handling -- this is not proper error handling! */ if (new_kernel == (KernelInfo *) NULL) { if (kernel != (KernelInfo *) NULL) kernel=DestroyKernelInfo(kernel); return((KernelInfo *) NULL); } /* initialise or append the kernel list */ if (kernel == (KernelInfo *) NULL) kernel=new_kernel; else LastKernelInfo(kernel)->next=new_kernel; } /* look for the next kernel in list */ p=strchr(p,';'); if (p == (char *) NULL) break; p++; } if (kernel_cache != (char *) NULL) kernel_cache=DestroyString(kernel_cache); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e K e r n e l B u i l t I n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelBuiltIn() returned one of the 'named' built-in types of % kernels used for special purposes such as gaussian blurring, skeleton % pruning, and edge distance determination. % % They take a KernelType, and a set of geometry style arguments, which were % typically decoded from a user supplied string, or from a more complex % Morphology Method that was requested. % % The format of the AcquireKernalBuiltIn method is: % % KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, % const GeometryInfo args) % % A description of each parameter follows: % % o type: the pre-defined type of kernel wanted % % o args: arguments defining or modifying the kernel % % Convolution Kernels % % Unity % The a No-Op or Scaling single element kernel. % % Gaussian:{radius},{sigma} % Generate a two-dimensional gaussian kernel, as used by -gaussian. % The sigma for the curve is required. The resulting kernel is % normalized, % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % NOTE: that the 'radius' is optional, but if provided can limit (clip) % the final size of the resulting kernel to a square 2*radius+1 in size. % The radius should be at least 2 times that of the sigma value, or % sever clipping and aliasing may result. If not given or set to 0 the % radius will be determined so as to produce the best minimal error % result, which is usally much larger than is normally needed. % % LoG:{radius},{sigma} % "Laplacian of a Gaussian" or "Mexician Hat" Kernel. % The supposed ideal edge detection, zero-summing kernel. % % An alturnative to this kernel is to use a "DoG" with a sigma ratio of % approx 1.6 (according to wikipedia). % % DoG:{radius},{sigma1},{sigma2} % "Difference of Gaussians" Kernel. % As "Gaussian" but with a gaussian produced by 'sigma2' subtracted % from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1. % The result is a zero-summing kernel. % % Blur:{radius},{sigma}[,{angle}] % Generates a 1 dimensional or linear gaussian blur, at the angle given % (current restricted to orthogonal angles). If a 'radius' is given the % kernel is clipped to a width of 2*radius+1. Kernel can be rotated % by a 90 degree angle. % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % Note that two convolutions with two "Blur" kernels perpendicular to % each other, is equivalent to a far larger "Gaussian" kernel with the % same sigma value, However it is much faster to apply. This is how the % "-blur" operator actually works. % % Comet:{width},{sigma},{angle} % Blur in one direction only, much like how a bright object leaves % a comet like trail. The Kernel is actually half a gaussian curve, % Adding two such blurs in opposite directions produces a Blur Kernel. % Angle can be rotated in multiples of 90 degrees. % % Note that the first argument is the width of the kernel and not the % radius of the kernel. % % Binomial:[{radius}] % Generate a discrete kernel using a 2 dimentional Pascel's Triangle % of values. Used for special forma of image filters % % # Still to be implemented... % # % # Filter2D % # Filter1D % # Set kernel values using a resize filter, and given scale (sigma) % # Cylindrical or Linear. Is this possible with an image? % # % % Named Constant Convolution Kernels % % All these are unscaled, zero-summing kernels by default. As such for % non-HDRI version of ImageMagick some form of normalization, user scaling, % and biasing the results is recommended, to prevent the resulting image % being 'clipped'. % % The 3x3 kernels (most of these) can be circularly rotated in multiples of % 45 degrees to generate the 8 angled varients of each of the kernels. % % Laplacian:{type} % Discrete Lapacian Kernels, (without normalization) % Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood) % Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood) % Type 2 : 3x3 with center:4 edge:1 corner:-2 % Type 3 : 3x3 with center:4 edge:-2 corner:1 % Type 5 : 5x5 laplacian % Type 7 : 7x7 laplacian % Type 15 : 5x5 LoG (sigma approx 1.4) % Type 19 : 9x9 LoG (sigma approx 1.4) % % Sobel:{angle} % Sobel 'Edge' convolution kernel (3x3) % | -1, 0, 1 | % | -2, 0, 2 | % | -1, 0, 1 | % % Roberts:{angle} % Roberts convolution kernel (3x3) % | 0, 0, 0 | % | -1, 1, 0 | % | 0, 0, 0 | % % Prewitt:{angle} % Prewitt Edge convolution kernel (3x3) % | -1, 0, 1 | % | -1, 0, 1 | % | -1, 0, 1 | % % Compass:{angle} % Prewitt's "Compass" convolution kernel (3x3) % | -1, 1, 1 | % | -1,-2, 1 | % | -1, 1, 1 | % % Kirsch:{angle} % Kirsch's "Compass" convolution kernel (3x3) % | -3,-3, 5 | % | -3, 0, 5 | % | -3,-3, 5 | % % FreiChen:{angle} % Frei-Chen Edge Detector is based on a kernel that is similar to % the Sobel Kernel, but is designed to be isotropic. That is it takes % into account the distance of the diagonal in the kernel. % % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | % | 1, 0, -1 | % % FreiChen:{type},{angle} % % Frei-Chen Pre-weighted kernels... % % Type 0: default un-nomalized version shown above. % % Type 1: Orthogonal Kernel (same as type 11 below) % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 2: Diagonal form of Kernel... % | 1, sqrt(2), 0 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 0, -sqrt(2) -1 | % % However this kernel is als at the heart of the FreiChen Edge Detection % Process which uses a set of 9 specially weighted kernel. These 9 % kernels not be normalized, but directly applied to the image. The % results is then added together, to produce the intensity of an edge in % a specific direction. The square root of the pixel value can then be % taken as the cosine of the edge, and at least 2 such runs at 90 degrees % from each other, both the direction and the strength of the edge can be % determined. % % Type 10: All 9 of the following pre-weighted kernels... % % Type 11: | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 12: | 1, sqrt(2), 1 | % | 0, 0, 0 | / 2*sqrt(2) % | 1, sqrt(2), 1 | % % Type 13: | sqrt(2), -1, 0 | % | -1, 0, 1 | / 2*sqrt(2) % | 0, 1, -sqrt(2) | % % Type 14: | 0, 1, -sqrt(2) | % | -1, 0, 1 | / 2*sqrt(2) % | sqrt(2), -1, 0 | % % Type 15: | 0, -1, 0 | % | 1, 0, 1 | / 2 % | 0, -1, 0 | % % Type 16: | 1, 0, -1 | % | 0, 0, 0 | / 2 % | -1, 0, 1 | % % Type 17: | 1, -2, 1 | % | -2, 4, -2 | / 6 % | -1, -2, 1 | % % Type 18: | -2, 1, -2 | % | 1, 4, 1 | / 6 % | -2, 1, -2 | % % Type 19: | 1, 1, 1 | % | 1, 1, 1 | / 3 % | 1, 1, 1 | % % The first 4 are for edge detection, the next 4 are for line detection % and the last is to add a average component to the results. % % Using a special type of '-1' will return all 9 pre-weighted kernels % as a multi-kernel list, so that you can use them directly (without % normalization) with the special "-set option:morphology:compose Plus" % setting to apply the full FreiChen Edge Detection Technique. % % If 'type' is large it will be taken to be an actual rotation angle for % the default FreiChen (type 0) kernel. As such FreiChen:45 will look % like a Sobel:45 but with 'sqrt(2)' instead of '2' values. % % WARNING: The above was layed out as per % http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf % But rotated 90 degrees so direction is from left rather than the top. % I have yet to find any secondary confirmation of the above. The only % other source found was actual source code at % http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf % Neigher paper defineds the kernels in a way that looks locical or % correct when taken as a whole. % % Boolean Kernels % % Diamond:[{radius}[,{scale}]] % Generate a diamond shaped kernel with given radius to the points. % Kernel size will again be radius*2+1 square and defaults to radius 1, % generating a 3x3 kernel that is slightly larger than a square. % % Square:[{radius}[,{scale}]] % Generate a square shaped kernel of size radius*2+1, and defaulting % to a 3x3 (radius 1). % % Octagon:[{radius}[,{scale}]] % Generate octagonal shaped kernel of given radius and constant scale. % Default radius is 3 producing a 7x7 kernel. A radius of 1 will result % in "Diamond" kernel. % % Disk:[{radius}[,{scale}]] % Generate a binary disk, thresholded at the radius given, the radius % may be a float-point value. Final Kernel size is floor(radius)*2+1 % square. A radius of 5.3 is the default. % % NOTE: That a low radii Disk kernels produce the same results as % many of the previously defined kernels, but differ greatly at larger % radii. Here is a table of equivalences... % "Disk:1" => "Diamond", "Octagon:1", or "Cross:1" % "Disk:1.5" => "Square" % "Disk:2" => "Diamond:2" % "Disk:2.5" => "Octagon" % "Disk:2.9" => "Square:2" % "Disk:3.5" => "Octagon:3" % "Disk:4.5" => "Octagon:4" % "Disk:5.4" => "Octagon:5" % "Disk:6.4" => "Octagon:6" % All other Disk shapes are unique to this kernel, but because a "Disk" % is more circular when using a larger radius, using a larger radius is % preferred over iterating the morphological operation. % % Rectangle:{geometry} % Simply generate a rectangle of 1's with the size given. You can also % specify the location of the 'control point', otherwise the closest % pixel to the center of the rectangle is selected. % % Properly centered and odd sized rectangles work the best. % % Symbol Dilation Kernels % % These kernel is not a good general morphological kernel, but is used % more for highlighting and marking any single pixels in an image using, % a "Dilate" method as appropriate. % % For the same reasons iterating these kernels does not produce the % same result as using a larger radius for the symbol. % % Plus:[{radius}[,{scale}]] % Cross:[{radius}[,{scale}]] % Generate a kernel in the shape of a 'plus' or a 'cross' with % a each arm the length of the given radius (default 2). % % NOTE: "plus:1" is equivalent to a "Diamond" kernel. % % Ring:{radius1},{radius2}[,{scale}] % A ring of the values given that falls between the two radii. % Defaults to a ring of approximataly 3 radius in a 7x7 kernel. % This is the 'edge' pixels of the default "Disk" kernel, % More specifically, "Ring" -> "Ring:2.5,3.5,1.0" % % Hit and Miss Kernels % % Peak:radius1,radius2 % Find any peak larger than the pixels the fall between the two radii. % The default ring of pixels is as per "Ring". % Edges % Find flat orthogonal edges of a binary shape % Corners % Find 90 degree corners of a binary shape % Diagonals:type % A special kernel to thin the 'outside' of diagonals % LineEnds:type % Find end points of lines (for pruning a skeletion) % Two types of lines ends (default to both) can be searched for % Type 0: All line ends % Type 1: single kernel for 4-conneected line ends % Type 2: single kernel for simple line ends % LineJunctions % Find three line junctions (within a skeletion) % Type 0: all line junctions % Type 1: Y Junction kernel % Type 2: Diagonal T Junction kernel % Type 3: Orthogonal T Junction kernel % Type 4: Diagonal X Junction kernel % Type 5: Orthogonal + Junction kernel % Ridges:type % Find single pixel ridges or thin lines % Type 1: Fine single pixel thick lines and ridges % Type 2: Find two pixel thick lines and ridges % ConvexHull % Octagonal Thickening Kernel, to generate convex hulls of 45 degrees % Skeleton:type % Traditional skeleton generating kernels. % Type 1: Tradional Skeleton kernel (4 connected skeleton) % Type 2: HIPR2 Skeleton kernel (8 connected skeleton) % Type 3: Thinning skeleton based on a ressearch paper by % Dan S. Bloomberg (Default Type) % ThinSE:type % A huge variety of Thinning Kernels designed to preserve conectivity. % many other kernel sets use these kernels as source definitions. % Type numbers are 41-49, 81-89, 481, and 482 which are based on % the super and sub notations used in the source research paper. % % Distance Measuring Kernels % % Different types of distance measuring methods, which are used with the % a 'Distance' morphology method for generating a gradient based on % distance from an edge of a binary shape, though there is a technique % for handling a anti-aliased shape. % % See the 'Distance' Morphological Method, for information of how it is % applied. % % Chebyshev:[{radius}][x{scale}[%!]] % Chebyshev Distance (also known as Tchebychev or Chessboard distance) % is a value of one to any neighbour, orthogonal or diagonal. One why % of thinking of it is the number of squares a 'King' or 'Queen' in % chess needs to traverse reach any other position on a chess board. % It results in a 'square' like distance function, but one where % diagonals are given a value that is closer than expected. % % Manhattan:[{radius}][x{scale}[%!]] % Manhattan Distance (also known as Rectilinear, City Block, or the Taxi % Cab distance metric), it is the distance needed when you can only % travel in horizontal or vertical directions only. It is the % distance a 'Rook' in chess would have to travel, and results in a % diamond like distances, where diagonals are further than expected. % % Octagonal:[{radius}][x{scale}[%!]] % An interleving of Manhatten and Chebyshev metrics producing an % increasing octagonally shaped distance. Distances matches those of % the "Octagon" shaped kernel of the same radius. The minimum radius % and default is 2, producing a 5x5 kernel. % % Euclidean:[{radius}][x{scale}[%!]] % Euclidean distance is the 'direct' or 'as the crow flys' distance. % However by default the kernel size only has a radius of 1, which % limits the distance to 'Knight' like moves, with only orthogonal and % diagonal measurements being correct. As such for the default kernel % you will get octagonal like distance function. % % However using a larger radius such as "Euclidean:4" you will get a % much smoother distance gradient from the edge of the shape. Especially % if the image is pre-processed to include any anti-aliasing pixels. % Of course a larger kernel is slower to use, and not always needed. % % The first three Distance Measuring Kernels will only generate distances % of exact multiples of {scale} in binary images. As such you can use a % scale of 1 without loosing any information. However you also need some % scaling when handling non-binary anti-aliased shapes. % % The "Euclidean" Distance Kernel however does generate a non-integer % fractional results, and as such scaling is vital even for binary shapes. % */ MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, const GeometryInfo *args) { KernelInfo *kernel; register ssize_t i; register ssize_t u, v; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ /* Generate a new empty kernel if needed */ kernel=(KernelInfo *) NULL; switch(type) { case UndefinedKernel: /* These should not call this function */ case UserDefinedKernel: assert("Should not call this function" != (char *) NULL); break; case LaplacianKernel: /* Named Descrete Convolution Kernels */ case SobelKernel: /* these are defined using other kernels */ case RobertsKernel: case PrewittKernel: case CompassKernel: case KirschKernel: case FreiChenKernel: case EdgesKernel: /* Hit and Miss kernels */ case CornersKernel: case DiagonalsKernel: case LineEndsKernel: case LineJunctionsKernel: case RidgesKernel: case ConvexHullKernel: case SkeletonKernel: case ThinSEKernel: break; /* A pre-generated kernel is not needed */ #if 0 /* set to 1 to do a compile-time check that we haven't missed anything */ case UnityKernel: case GaussianKernel: case DoGKernel: case LoGKernel: case BlurKernel: case CometKernel: case BinomialKernel: case DiamondKernel: case SquareKernel: case RectangleKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: case RingKernel: case PeaksKernel: case ChebyshevKernel: case ManhattanKernel: case OctangonalKernel: case EuclideanKernel: #else default: #endif /* Generate the base Kernel Structure */ kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = type; kernel->next = (KernelInfo *) NULL; kernel->signature = MagickCoreSignature; break; } switch(type) { /* Convolution Kernels */ case UnityKernel: { kernel->height = kernel->width = (size_t) 1; kernel->x = kernel->y = (ssize_t) 0; kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1, sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); kernel->maximum = kernel->values[0] = args->rho; break; } break; case GaussianKernel: case DoGKernel: case LoGKernel: { double sigma = fabs(args->sigma), sigma2 = fabs(args->xi), A, B, R; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else if ( (type != DoGKernel) || (sigma >= sigma2) ) kernel->width = GetOptimalKernelWidth2D(args->rho,sigma); else kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2); kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* WARNING: The following generates a 'sampled gaussian' kernel. * What we really want is a 'discrete gaussian' kernel. * * How to do this is I don't know, but appears to be basied on the * Error Function 'erf()' (intergral of a gaussian) */ if ( type == GaussianKernel || type == DoGKernel ) { /* Calculate a Gaussian, OR positive half of a DoG */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } if ( type == DoGKernel ) { /* Subtract a Negative Gaussian for "Difference of Gaussian" */ if ( sigma2 > MagickEpsilon ) { sigma = sigma2; /* simplify loop expressions */ A = 1.0/(2.0*sigma*sigma); B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0; } if ( type == LoGKernel ) { /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { R = ((double)(u*u+v*v))*A; kernel->values[i] = (1-R)*exp(-R)*B; } } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } /* Note the above kernels may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, and thus ** producing a very bright kernel. ** ** Normalization will still be needed. */ /* Normalize the 2D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); break; } case BlurKernel: { double sigma = fabs(args->sigma), alpha, beta; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else kernel->width = GetOptimalKernelWidth1D(args->rho,sigma); kernel->height = 1; kernel->x = (ssize_t) (kernel->width-1)/2; kernel->y = 0; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); #if 1 #define KernelRank 3 /* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix). ** It generates a gaussian 3 times the width, and compresses it into ** the expected range. This produces a closer normalization of the ** resulting kernel, especially for very low sigma values. ** As such while wierd it is prefered. ** ** I am told this method originally came from Photoshop. ** ** A properly normalized curve is generated (apart from edge clipping) ** even though we later normalize the result (for edge clipping) ** to allow the correct generation of a "Difference of Blurs". */ /* initialize */ v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */ (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); /* Calculate a Positive 1D Gaussian */ if ( sigma > MagickEpsilon ) { sigma *= KernelRank; /* simplify loop expressions */ alpha = 1.0/(2.0*sigma*sigma); beta= (double) (1.0/(MagickSQ2PI*sigma )); for ( u=-v; u <= v; u++) { kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*alpha)*beta; } } else /* special case - generate a unity kernel */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; #else /* Direct calculation without curve averaging This is equivelent to a KernelRank of 1 */ /* Calculate a Positive Gaussian */ if ( sigma > MagickEpsilon ) { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ beta = 1.0/(MagickSQ2PI*sigma); for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u))*alpha)*beta; } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } #endif /* Note the above kernel may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (< 0.1) the central value becomes larger than one, as a ** result of not generating a actual 'discrete' kernel, and thus ** producing a very bright 'impulse'. ** ** Becuase of these two factors Normalization is required! */ /* Normalize the 1D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); /* rotate the 1D kernel by given angle */ RotateKernelInfo(kernel, args->xi ); break; } case CometKernel: { double sigma = fabs(args->sigma), A; if ( args->rho < 1.0 ) kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1; else kernel->width = (size_t)args->rho; kernel->x = kernel->y = 0; kernel->height = 1; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* A comet blur is half a 1D gaussian curve, so that the object is ** blurred in one direction only. This may not be quite the right ** curve to use so may change in the future. The function must be ** normalised after generation, which also resolves any clipping. ** ** As we are normalizing and not subtracting gaussians, ** there is no need for a divisor in the gaussian formula ** ** It is less comples */ if ( sigma > MagickEpsilon ) { #if 1 #define KernelRank 3 v = (ssize_t) kernel->width*KernelRank; /* start/end points */ (void) memset(kernel->values,0, (size_t) kernel->width*sizeof(*kernel->values)); sigma *= KernelRank; /* simplify the loop expression */ A = 1.0/(2.0*sigma*sigma); /* B = 1.0/(MagickSQ2PI*sigma); */ for ( u=0; u < v; u++) { kernel->values[u/KernelRank] += exp(-((double)(u*u))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ } for (i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i]; #else A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */ /* B = 1.0/(MagickSQ2PI*sigma); */ for ( i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i] = exp(-((double)(i*i))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ #endif } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; } kernel->minimum = 0.0; kernel->maximum = kernel->values[0]; kernel->negative_range = 0.0; ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */ RotateKernelInfo(kernel, args->xi); /* Rotate by angle */ break; } case BinomialKernel: { size_t order_f; if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; order_f = fact(kernel->width-1); kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=0; v < (ssize_t)kernel->height; v++) { size_t alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) ); for ( u=0; u < (ssize_t)kernel->width; u++, i++) kernel->positive_range += kernel->values[i] = (double) (alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) )); } kernel->minimum = 1.0; kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width]; kernel->negative_range = 0.0; break; } /* Convolution Kernels - Well Known Named Constant Kernels */ case LaplacianKernel: { switch ( (int) args->rho ) { case 0: default: /* laplacian square filter -- default */ kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1"); break; case 1: /* laplacian diamond filter */ kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0"); break; case 2: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); break; case 3: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1"); break; case 5: /* a 5x5 laplacian */ kernel=ParseKernelArray( "5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4"); break; case 7: /* a 7x7 laplacian */ kernel=ParseKernelArray( "7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" ); break; case 15: /* a 5x5 LoG (sigma approx 1.4) */ kernel=ParseKernelArray( "5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0"); break; case 19: /* a 9x9 LoG (sigma approx 1.4) */ /* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */ kernel=ParseKernelArray( "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; break; } case SobelKernel: { /* Simple Sobel Kernel */ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case RobertsKernel: { kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case PrewittKernel: { kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case CompassKernel: { kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case KirschKernel: { kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case FreiChenKernel: /* Direction is set to be left to right positive */ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */ { switch ( (int) args->rho ) { default: case 0: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +MagickSQ2; kernel->values[5] = -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ break; case 2: kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = kernel->values[3]= +MagickSQ2; kernel->values[5] = kernel->values[7]= -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 10: kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19"); if (kernel == (KernelInfo *) NULL) return(kernel); break; case 1: case 11: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +MagickSQ2; kernel->values[5] = -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 12: kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = +MagickSQ2; kernel->values[7] = +MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 13: kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[0] = +MagickSQ2; kernel->values[8] = -MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 14: kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[2] = -MagickSQ2; kernel->values[6] = +MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 15: kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 16: kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 17: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 18: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 19: kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/3.0, NoValue); break; } if ( fabs(args->sigma) >= MagickEpsilon ) /* Rotate by correctly supplied 'angle' */ RotateKernelInfo(kernel, args->sigma); else if ( args->rho > 30.0 || args->rho < -30.0 ) /* Rotate by out of bounds 'type' */ RotateKernelInfo(kernel, args->rho); break; } /* Boolean or Shaped Kernels */ case DiamondKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case SquareKernel: case RectangleKernel: { double scale; if ( type == SquareKernel ) { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = (size_t) (2*args->rho+1); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; scale = args->sigma; } else { /* NOTE: user defaults set in "AcquireKernelInfo()" */ if ( args->rho < 1.0 || args->sigma < 1.0 ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->width = (size_t)args->rho; kernel->height = (size_t)args->sigma; if ( args->xi < 0.0 || args->xi > (double)kernel->width || args->psi < 0.0 || args->psi > (double)kernel->height ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->x = (ssize_t) args->xi; kernel->y = (ssize_t) args->psi; scale = 1.0; } kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values to scale given */ u=(ssize_t) (kernel->width*kernel->height); for ( i=0; i < u; i++) kernel->values[i] = scale; kernel->minimum = kernel->maximum = scale; /* a flat shape */ kernel->positive_range = scale*u; break; } case OctagonKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= ((long)kernel->x + (long)(kernel->x/2)) ) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case DiskKernel: { ssize_t limit = (ssize_t)(args->rho*args->rho); if (args->rho < 0.4) /* default radius approx 4.3 */ kernel->width = kernel->height = 9L, limit = 18L; else kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ((u*u+v*v) <= limit) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case PlusKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } case CrossKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == v || u == -v) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } /* HitAndMiss Kernels */ case RingKernel: case PeaksKernel: { ssize_t limit1, limit2, scale; if (args->rho < args->sigma) { kernel->width = ((size_t)args->sigma)*2+1; limit1 = (ssize_t)(args->rho*args->rho); limit2 = (ssize_t)(args->sigma*args->sigma); } else { kernel->width = ((size_t)args->rho)*2+1; limit1 = (ssize_t)(args->sigma*args->sigma); limit2 = (ssize_t)(args->rho*args->rho); } if ( limit2 <= 0 ) kernel->width = 7L, limit1 = 7L, limit2 = 11L; kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi); for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { ssize_t radius=u*u+v*v; if (limit1 < radius && radius <= limit2) kernel->positive_range += kernel->values[i] = (double) scale; else kernel->values[i] = nan; } kernel->minimum = kernel->maximum = (double) scale; if ( type == PeaksKernel ) { /* set the central point in the middle */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; kernel->maximum = 1.0; } break; } case EdgesKernel: { kernel=AcquireKernelInfo("ThinSE:482"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */ break; } case CornersKernel: { kernel=AcquireKernelInfo("ThinSE:87"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */ break; } case DiagonalsKernel: { switch ( (int) args->rho ) { case 0: default: { KernelInfo *new_kernel; kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; ExpandMirrorKernelInfo(kernel); return(kernel); } case 1: kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); break; case 2: kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineEndsKernel: { /* Kernels for finding the end of thin lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all end of lines */ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>")); case 1: /* kernel for 4-connected line ends - no rotation */ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-"); break; case 2: /* kernel to add for 8-connected lines - no rotation */ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1"); break; case 3: /* kernel to add for orthogonal line ends - does not find corners */ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0"); break; case 4: /* traditional line end - fails on last T end */ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineJunctionsKernel: { /* kernels for finding the junctions of multiple lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all line junctions */ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>")); case 1: /* Y Junction */ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-"); break; case 2: /* Diagonal T Junctions */ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1"); break; case 3: /* Orthogonal T Junctions */ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-"); break; case 4: /* Diagonal X Junctions */ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1"); break; case 5: /* Orthogonal X Junctions - minimal diamond kernel */ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case RidgesKernel: { /* Ridges - Ridge finding kernels */ KernelInfo *new_kernel; switch ( (int) args->rho ) { case 1: default: kernel=ParseKernelArray("3x1:0,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */ break; case 2: kernel=ParseKernelArray("4x1:0,1,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */ /* Unfortunatally we can not yet rotate a non-square kernel */ /* But then we can't flip a non-symetrical kernel either */ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; break; } break; } case ConvexHullKernel: { KernelInfo *new_kernel; /* first set of 8 kernels */ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* append the mirror versions too - no flip function yet */ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; ExpandRotateKernelInfo(new_kernel, 90.0); LastKernelInfo(kernel)->next = new_kernel; break; } case SkeletonKernel: { switch ( (int) args->rho ) { case 1: default: /* Traditional Skeleton... ** A cyclically rotated single kernel */ kernel=AcquireKernelInfo("ThinSE:482"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */ break; case 2: /* HIPR Variation of the cyclic skeleton ** Corners of the traditional method made more forgiving, ** but the retain the same cyclic order. */ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;"); if (kernel == (KernelInfo *) NULL) return(kernel); if (kernel->next == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); kernel->type = type; kernel->next->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */ break; case 3: /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf */ kernel=AcquireKernelInfo( "ThinSE:41; ThinSE:42; ThinSE:43"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->next->type = type; kernel->next->next->type = type; ExpandMirrorKernelInfo(kernel); /* 12 kernels total */ break; } break; } case ThinSEKernel: { /* Special kernels for general thinning, while preserving connections ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf ** And ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html ** ** Note kernels do not specify the origin pixel, allowing them ** to be used for both thickening and thinning operations. */ switch ( (int) args->rho ) { /* SE for 4-connected thinning */ case 41: /* SE_4_1 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1"); break; case 42: /* SE_4_2 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-"); break; case 43: /* SE_4_3 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1"); break; case 44: /* SE_4_4 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-"); break; case 45: /* SE_4_5 */ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-"); break; case 46: /* SE_4_6 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1"); break; case 47: /* SE_4_7 */ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-"); break; case 48: /* SE_4_8 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1"); break; case 49: /* SE_4_9 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1"); break; /* SE for 8-connected thinning - negatives of the above */ case 81: /* SE_8_0 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-"); break; case 82: /* SE_8_2 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-"); break; case 83: /* SE_8_3 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-"); break; case 84: /* SE_8_4 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-"); break; case 85: /* SE_8_5 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-"); break; case 86: /* SE_8_6 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1"); break; case 87: /* SE_8_7 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-"); break; case 88: /* SE_8_8 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-"); break; case 89: /* SE_8_9 */ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-"); break; /* Special combined SE kernels */ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-"); break; case 823: /* SE_8_2 , SE_8_3 Combined Kernel */ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-"); break; case 481: /* SE_48_1 - General Connected Corner Kernel */ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-"); break; default: case 482: /* SE_48_2 - General Edge Kernel */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } /* Distance Measuring Kernels */ case ChebyshevKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*MagickMax(fabs((double)u),fabs((double)v)) ); kernel->maximum = kernel->values[0]; break; } case ManhattanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*(labs((long) u)+labs((long) v)) ); kernel->maximum = kernel->values[0]; break; } case OctagonalKernel: { if (args->rho < 2.0) kernel->width = kernel->height = 5; /* default/minimum radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { double r1 = MagickMax(fabs((double)u),fabs((double)v)), r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5); kernel->positive_range += kernel->values[i] = args->sigma*MagickMax(r1,r2); } kernel->maximum = kernel->values[0]; break; } case EuclideanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*sqrt((double)(u*u+v*v)) ); kernel->maximum = kernel->values[0]; break; } default: { /* No-Op Kernel - Basically just a single pixel on its own */ kernel=ParseKernelArray("1:1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = UndefinedKernel; break; } break; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneKernelInfo() creates a new clone of the given Kernel List so that its % can be modified without effecting the original. The cloned kernel should % be destroyed using DestoryKernelInfo() when no longer needed. % % The format of the CloneKernelInfo method is: % % KernelInfo *CloneKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be cloned % */ MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel) { register ssize_t i; KernelInfo *new_kernel; assert(kernel != (KernelInfo *) NULL); new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (new_kernel == (KernelInfo *) NULL) return(new_kernel); *new_kernel=(*kernel); /* copy values in structure */ /* replace the values with a copy of the values */ new_kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (new_kernel->values == (double *) NULL) return(DestroyKernelInfo(new_kernel)); for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) new_kernel->values[i]=kernel->values[i]; /* Also clone the next kernel in the kernel list */ if ( kernel->next != (KernelInfo *) NULL ) { new_kernel->next = CloneKernelInfo(kernel->next); if ( new_kernel->next == (KernelInfo *) NULL ) return(DestroyKernelInfo(new_kernel)); } return(new_kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyKernelInfo() frees the memory used by a Convolution/Morphology % kernel. % % The format of the DestroyKernelInfo method is: % % KernelInfo *DestroyKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be destroyed % */ MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel) { assert(kernel != (KernelInfo *) NULL); if (kernel->next != (KernelInfo *) NULL) kernel->next=DestroyKernelInfo(kernel->next); kernel->values=(double *) RelinquishAlignedMemory(kernel->values); kernel=(KernelInfo *) RelinquishMagickMemory(kernel); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d M i r r o r K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandMirrorKernelInfo() takes a single kernel, and expands it into a % sequence of 90-degree rotated kernels but providing a reflected 180 % rotatation, before the -/+ 90-degree rotations. % % This special rotation order produces a better, more symetrical thinning of % objects. % % The format of the ExpandMirrorKernelInfo method is: % % void ExpandMirrorKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ #if 0 static void FlopKernelInfo(KernelInfo *kernel) { /* Do a Flop by reversing each row. */ size_t y; register ssize_t x,r; register double *k,t; for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width) for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--) t=k[x], k[x]=k[r], k[r]=t; kernel->x = kernel->width - kernel->x - 1; angle = fmod(angle+180.0, 360.0); } #endif static void ExpandMirrorKernelInfo(KernelInfo *kernel) { KernelInfo *clone, *last; last = kernel; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flip */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 90); /* transpose */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flop */ LastKernelInfo(last)->next = clone; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating % incrementally by the angle given, until the kernel repeats. % % WARNING: 45 degree rotations only works for 3x3 kernels. % While 90 degree roatations only works for linear and square kernels % % The format of the ExpandRotateKernelInfo method is: % % void ExpandRotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ /* Internal Routine - Return true if two kernels are the same */ static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1, const KernelInfo *kernel2) { register size_t i; /* check size and origin location */ if ( kernel1->width != kernel2->width || kernel1->height != kernel2->height || kernel1->x != kernel2->x || kernel1->y != kernel2->y ) return MagickFalse; /* check actual kernel values */ for (i=0; i < (kernel1->width*kernel1->height); i++) { /* Test for Nan equivalence */ if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) ) return MagickFalse; if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) ) return MagickFalse; /* Test actual values are equivalent */ if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon ) return MagickFalse; } return MagickTrue; } static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle) { KernelInfo *clone_info, *last; last=kernel; DisableMSCWarning(4127) while (1) { RestoreMSCWarning clone_info=CloneKernelInfo(last); if (clone_info == (KernelInfo *) NULL) break; RotateKernelInfo(clone_info,angle); if (SameKernelInfo(kernel,clone_info) != MagickFalse) break; LastKernelInfo(last)->next=clone_info; last=clone_info; } if (clone_info != (KernelInfo *) NULL) clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a l c M e t a K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only, % using the kernel values. This should only ne used if it is not possible to % calculate that meta-data in some easier way. % % It is important that the meta-data is correct before ScaleKernelInfo() is % used to perform kernel normalization. % % The format of the CalcKernelMetaData method is: % % void CalcKernelMetaData(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % WARNING: Minimum and Maximum values are assumed to include zero, even if % zero is not part of the kernel (as in Gaussian Derived kernels). This % however is not true for flat-shaped morphological kernels. % % WARNING: Only the specific kernel pointed to is modified, not a list of % multiple kernels. % % This is an internal function and not expected to be useful outside this % module. This could change however. */ static void CalcKernelMetaData(KernelInfo *kernel) { register size_t i; kernel->minimum = kernel->maximum = 0.0; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; i < (kernel->width*kernel->height); i++) { if ( fabs(kernel->values[i]) < MagickEpsilon ) kernel->values[i] = 0.0; ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y A p p l y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyApply() applies a morphological method, multiple times using % a list of multiple kernels. This is the method that should be called by % other 'operators' that internally use morphology operations as part of % their processing. % % It is basically equivalent to as MorphologyImage() (see below) but % without any user controls. This allows internel programs to use this % function, to actually perform a specific task without possible interference % by any API user supplied settings. % % It is MorphologyImage() task to extract any such user controls, and % pass them to this function for processing. % % More specifically all given kernels should already be scaled, normalised, % and blended appropriatally before being parred to this routine. The % appropriate bias, and compose (typically 'UndefinedComposeOp') given. % % The format of the MorphologyApply method is: % % Image *MorphologyApply(const Image *image,MorphologyMethod method, % const ChannelType channel, const ssize_t iterations, % const KernelInfo *kernel, const CompositeMethod compose, % const double bias, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the source image % % o method: the morphology method to be applied. % % o channel: the channels to which the operations are applied % The channel 'sync' flag determines if 'alpha weighting' is % applied for convolution style operations. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % % o compose: How to handle or merge multi-kernel results. % If 'UndefinedCompositeOp' use default for the Morphology method. % If 'NoCompositeOp' force image to be re-iterated by each kernel. % Otherwise merge the results using the compose method given. % % o bias: Convolution Output Bias. % % o exception: return any errors or warnings in this structure. % */ /* Apply a Morphology Primative to an image using the given kernel. ** Two pre-created images must be provided, and no image is created. ** It returns the number of pixels that changed between the images ** for result convergence determination. */ static ssize_t MorphologyPrimitive(const Image *image, Image *result_image, const MorphologyMethod method, const ChannelType channel, const KernelInfo *kernel,const double bias,ExceptionInfo *exception) { #define MorphologyTag "Morphology/Image" CacheView *p_view, *q_view; register ssize_t i; size_t *changes, changed, virt_width; ssize_t y, offx, offy; MagickBooleanType status; MagickOffsetType progress; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(result_image != (Image *) NULL); assert(result_image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; progress=0; p_view=AcquireVirtualCacheView(image,exception); q_view=AcquireAuthenticCacheView(result_image,exception); virt_width=image->columns+kernel->width-1; /* Some methods (including convolve) needs use a reflected kernel. * Adjust 'origin' offsets to loop though kernel as a reflection. */ offx = kernel->x; offy = kernel->y; switch(method) { case ConvolveMorphology: case DilateMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: /* kernel needs to used with reflection about origin */ offx = (ssize_t) kernel->width-offx-1; offy = (ssize_t) kernel->height-offy-1; break; case ErodeMorphology: case ErodeIntensityMorphology: case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: /* kernel is used as is, without reflection */ break; default: assert("Not a Primitive Morphology Method" != (char *) NULL); break; } changed=0; changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(), sizeof(*changes)); if (changes == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changes[i]=0; if ( method == ConvolveMorphology && kernel->width == 1 ) { /* Special handling (for speed) of vertical (blur) kernels. ** This performs its handling in columns rather than in rows. ** This is only done for convolve as it is the only method that ** generates very large 1-D vertical kernels (such as a 'BlurKernel') ** ** Timing tests (on single CPU laptop) ** Using a vertical 1-d Blue with normal row-by-row (below) ** time convert logo: -morphology Convolve Blur:0x10+90 null: ** 0.807u ** Using this column method ** time convert logo: -morphology Convolve Blur:0x10+90 null: ** 0.620u ** ** Anthony Thyssen, 14 June 2010 */ register ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,result_image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t y; ssize_t r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(p_view,x,-offy,1,image->rows+kernel->height-1, exception); q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } p_indexes=GetCacheViewVirtualIndexQueue(p_view); q_indexes=GetCacheViewAuthenticIndexQueue(q_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = offy; for (y=0; y < (ssize_t) image->rows; y++) { DoublePixelPacket result; register ssize_t v; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; /* Copy input image to the output image for unused channels * This removes need for 'cloning' a new image every iteration */ *q = p[r]; if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+y+r)); /* Set the bias of the weighted average output */ result.red = result.green = result.blue = result.opacity = result.index = bias; /* Weighted Average of pixels using reflected kernel ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. */ k = &kernel->values[ kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+y; if ( ((channel & SyncChannels) == 0 ) || (image->matte == MagickFalse) ) { /* No 'Sync' involved. ** Convolution is simple greyscale channel operation */ for (v=0; v < (ssize_t) kernel->height; v++) { if ( IsNaN(*k) ) continue; result.red += (*k)*GetPixelRed(k_pixels); result.green += (*k)*GetPixelGreen(k_pixels); result.blue += (*k)*GetPixelBlue(k_pixels); result.opacity += (*k)*GetPixelOpacity(k_pixels); if ( image->colorspace == CMYKColorspace) result.index += (*k)*(*k_indexes); k--; k_pixels++; k_indexes++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+y,ClampToQuantum(result.index)); } else { /* Channel 'Sync' Flag, and Alpha Channel enabled. ** Weight the color channels with Alpha Channel so that ** transparent pixels are not part of the results. */ double gamma; /* divisor, sum of color alpha weighting */ MagickRealType alpha; /* alpha weighting for colors : alpha */ size_t count; /* alpha valus collected, number kernel values */ count=0; gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { if ( IsNaN(*k) ) continue; alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels)); count++; /* number of alpha values collected */ alpha*=(*k); /* include kernel weighting now */ gamma += alpha; /* normalize alpha weights only */ result.red += alpha*GetPixelRed(k_pixels); result.green += alpha*GetPixelGreen(k_pixels); result.blue += alpha*GetPixelBlue(k_pixels); result.opacity += (*k)*GetPixelOpacity(k_pixels); if ( image->colorspace == CMYKColorspace) result.index += alpha*(*k_indexes); k--; k_pixels++; k_indexes++; } /* Sync'ed channels, all channels are modified */ gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height/count; SetPixelRed(q,ClampToQuantum(gamma*result.red)); SetPixelGreen(q,ClampToQuantum(gamma*result.green)); SetPixelBlue(q,ClampToQuantum(gamma*result.blue)); SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+y,ClampToQuantum(gamma*result.index)); } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q)) || ( p[r].green != GetPixelGreen(q)) || ( p[r].blue != GetPixelBlue(q)) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+y+r) != GetPixelIndex(q_indexes+y))) ) changes[id]++; p++; q++; } /* y */ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* x */ result_image->type=image->type; q_view=DestroyCacheView(q_view); p_view=DestroyCacheView(p_view); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changed+=changes[i]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : 0); } /* ** Normal handling of horizontal or rectangular kernels (row by row) */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,result_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t x; size_t r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width, kernel->height, exception); q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } p_indexes=GetCacheViewVirtualIndexQueue(p_view); q_indexes=GetCacheViewAuthenticIndexQueue(q_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = virt_width*offy + offx; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t v; register ssize_t u; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; DoublePixelPacket result, min, max; /* Copy input image to the output image for unused channels * This removes need for 'cloning' a new image every iteration */ *q = p[r]; if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+x+r)); /* Defaults */ min.red = min.green = min.blue = min.opacity = min.index = (double) QuantumRange; max.red = max.green = max.blue = max.opacity = max.index = 0.0; /* default result is the original pixel value */ result.red = (double) p[r].red; result.green = (double) p[r].green; result.blue = (double) p[r].blue; result.opacity = QuantumRange - (double) p[r].opacity; result.index = 0.0; if ( image->colorspace == CMYKColorspace) result.index = (double) GetPixelIndex(p_indexes+x+r); switch (method) { case ConvolveMorphology: /* Set the bias of the weighted average output */ result.red = result.green = result.blue = result.opacity = result.index = bias; break; case DilateIntensityMorphology: case ErodeIntensityMorphology: /* use a boolean flag indicating when first match found */ result.red = 0.0; /* result is not used otherwise */ break; default: break; } switch ( method ) { case ConvolveMorphology: /* Weighted Average of pixels using reflected kernel ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. ** ** Correlation is actually the same as this but without reflecting ** the kernel, and thus 'lower-level' that Convolution. However ** as Convolution is the more common method used, and it does not ** really cost us much in terms of processing to use a reflected ** kernel, so it is Convolution that is implemented. ** ** Correlation will have its kernel reflected before calling ** this function to do a Convolve. ** ** For more details of Correlation vs Convolution see ** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; if ( ((channel & SyncChannels) == 0 ) || (image->matte == MagickFalse) ) { /* No 'Sync' involved. ** Convolution is simple greyscale channel operation */ for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; result.red += (*k)*k_pixels[u].red; result.green += (*k)*k_pixels[u].green; result.blue += (*k)*k_pixels[u].blue; result.opacity += (*k)*k_pixels[u].opacity; if ( image->colorspace == CMYKColorspace) result.index += (*k)*GetPixelIndex(k_indexes+u); } k_pixels += virt_width; k_indexes += virt_width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum((MagickRealType) result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); } else { /* Channel 'Sync' Flag, and Alpha Channel enabled. ** Weight the color channels with Alpha Channel so that ** transparent pixels are not part of the results. */ double alpha, /* alpha weighting for colors : alpha */ gamma; /* divisor, sum of color alpha weighting */ size_t count; /* alpha valus collected, number kernel values */ count=0; gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity); count++; /* number of alpha values collected */ alpha*=(*k); /* include kernel weighting now */ gamma += alpha; /* normalize alpha weights only */ result.red += alpha*k_pixels[u].red; result.green += alpha*k_pixels[u].green; result.blue += alpha*k_pixels[u].blue; result.opacity += (*k)*k_pixels[u].opacity; if ( image->colorspace == CMYKColorspace) result.index+=alpha*GetPixelIndex(k_indexes+u); } k_pixels += virt_width; k_indexes += virt_width; } /* Sync'ed channels, all channels are modified */ gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height*kernel->width/count; SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue))); SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma* result.index))); } break; case ErodeMorphology: /* Minimum Value within kernel neighbourhood ** ** NOTE that the kernel is not reflected for this operation! ** ** NOTE: in normal Greyscale Morphology, the kernel value should ** be added to the real value, this is currently not done, due to ** the nature of the boolean kernels being used. */ k = kernel->values; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; Minimize(min.red, (double) k_pixels[u].red); Minimize(min.green, (double) k_pixels[u].green); Minimize(min.blue, (double) k_pixels[u].blue); Minimize(min.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(min.index,(double) GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case DilateMorphology: /* Maximum Value within kernel neighbourhood ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. ** ** NOTE: in normal Greyscale Morphology, the kernel value should ** be added to the real value, this is currently not done, due to ** the nature of the boolean kernels being used. ** */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; Maximize(max.red, (double) k_pixels[u].red); Maximize(max.green, (double) k_pixels[u].green); Maximize(max.blue, (double) k_pixels[u].blue); Maximize(max.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Maximize(max.index, (double) GetPixelIndex( k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: /* Minimum of Foreground Pixel minus Maxumum of Background Pixels ** ** NOTE that the kernel is not reflected for this operation, ** and consists of both foreground and background pixel ** neighbourhoods, 0.0 for background, and 1.0 for foreground ** with either Nan or 0.5 values for don't care. ** ** Note that this will never produce a meaningless negative ** result. Such results can cause Thinning/Thicken to not work ** correctly when used against a greyscale image. */ k = kernel->values; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) ) continue; if ( (*k) > 0.7 ) { /* minimim of foreground pixels */ Minimize(min.red, (double) k_pixels[u].red); Minimize(min.green, (double) k_pixels[u].green); Minimize(min.blue, (double) k_pixels[u].blue); Minimize(min.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(min.index,(double) GetPixelIndex( k_indexes+u)); } else if ( (*k) < 0.3 ) { /* maximum of background pixels */ Maximize(max.red, (double) k_pixels[u].red); Maximize(max.green, (double) k_pixels[u].green); Maximize(max.blue, (double) k_pixels[u].blue); Maximize(max.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Maximize(max.index, (double) GetPixelIndex( k_indexes+u)); } } k_pixels += virt_width; k_indexes += virt_width; } /* Pattern Match if difference is positive */ min.red -= max.red; Maximize( min.red, 0.0 ); min.green -= max.green; Maximize( min.green, 0.0 ); min.blue -= max.blue; Maximize( min.blue, 0.0 ); min.opacity -= max.opacity; Maximize( min.opacity, 0.0 ); min.index -= max.index; Maximize( min.index, 0.0 ); break; case ErodeIntensityMorphology: /* Select Pixel with Minimum Intensity within kernel neighbourhood ** ** WARNING: the intensity test fails for CMYK and does not ** take into account the moderating effect of the alpha channel ** on the intensity. ** ** NOTE that the kernel is not reflected for this operation! */ k = kernel->values; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; if ( result.red == 0.0 || GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) { /* copy the whole pixel - no channel selection */ *q = k_pixels[u]; if ( result.red > 0.0 ) changes[id]++; result.red = 1.0; } } k_pixels += virt_width; k_indexes += virt_width; } break; case DilateIntensityMorphology: /* Select Pixel with Maximum Intensity within kernel neighbourhood ** ** WARNING: the intensity test fails for CMYK and does not ** take into account the moderating effect of the alpha channel ** on the intensity (yet). ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */ if ( result.red == 0.0 || GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) { /* copy the whole pixel - no channel selection */ *q = k_pixels[u]; if ( result.red > 0.0 ) changes[id]++; result.red = 1.0; } } k_pixels += virt_width; k_indexes += virt_width; } break; case IterativeDistanceMorphology: /* Work out an iterative distance from black edge of a white image ** shape. Essentually white values are decreased to the smallest ** 'distance from edge' it can find. ** ** It works by adding kernel values to the neighbourhood, and and ** select the minimum value found. The kernel is rotated before ** use, so kernel distances match resulting distances, when a user ** provided asymmetric kernel is applied. ** ** ** This code is almost identical to True GrayScale Morphology But ** not quite. ** ** GreyDilate Kernel values added, maximum value found Kernel is ** rotated before use. ** ** GrayErode: Kernel values subtracted and minimum value found No ** kernel rotation used. ** ** Note the the Iterative Distance method is essentially a ** GrayErode, but with negative kernel values, and kernel ** rotation applied. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case UndefinedMorphology: default: break; /* Do nothing */ } /* Final mathematics of results (combine with original image?) ** ** NOTE: Difference Morphology operators Edge* and *Hat could also ** be done here but works better with iteration as a image difference ** in the controlling function (below). Thicken and Thinning however ** should be done here so thay can be iterated correctly. */ switch ( method ) { case HitAndMissMorphology: case ErodeMorphology: result = min; /* minimum of neighbourhood */ break; case DilateMorphology: result = max; /* maximum of neighbourhood */ break; case ThinningMorphology: /* subtract pattern match from original */ result.red -= min.red; result.green -= min.green; result.blue -= min.blue; result.opacity -= min.opacity; result.index -= min.index; break; case ThickenMorphology: /* Add the pattern matchs to the original */ result.red += min.red; result.green += min.green; result.blue += min.blue; result.opacity += min.opacity; result.index += min.index; break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case UndefinedMorphology: case ConvolveMorphology: case DilateIntensityMorphology: case ErodeIntensityMorphology: break; /* full pixel was directly assigned - not a channel method */ default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if ((channel & OpacityChannel) != 0 && image->matte != MagickFalse ) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) ) changes[id]++; p++; q++; } /* x */ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* y */ q_view=DestroyCacheView(q_view); p_view=DestroyCacheView(p_view); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changed+=changes[i]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t)changed : -1); } /* This is almost identical to the MorphologyPrimative() function above, ** but will apply the primitive directly to the actual image using two ** passes, once in each direction, with the results of the previous (and ** current) row being re-used. ** ** That is after each row is 'Sync'ed' into the image, the next row will ** make use of those values as part of the calculation of the next row. ** It then repeats, but going in the oppisite (bottom-up) direction. ** ** Because of this 're-use of results' this function can not make use ** of multi-threaded, parellel processing. */ static ssize_t MorphologyPrimitiveDirect(Image *image, const MorphologyMethod method, const ChannelType channel, const KernelInfo *kernel,ExceptionInfo *exception) { CacheView *auth_view, *virt_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y, offx, offy; size_t changed, virt_width; status=MagickTrue; changed=0; progress=0; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Some methods (including convolve) needs use a reflected kernel. * Adjust 'origin' offsets to loop though kernel as a reflection. */ offx = kernel->x; offy = kernel->y; switch(method) { case DistanceMorphology: case VoronoiMorphology: /* kernel needs to used with reflection about origin */ offx = (ssize_t) kernel->width-offx-1; offy = (ssize_t) kernel->height-offy-1; break; #if 0 case ?????Morphology: /* kernel is used as is, without reflection */ break; #endif default: assert("Not a PrimativeDirect Morphology Method" != (char *) NULL); break; } /* DO NOT THREAD THIS CODE! */ /* two views into same image (virtual, and actual) */ virt_view=AcquireVirtualCacheView(image,exception); auth_view=AcquireAuthenticCacheView(image,exception); virt_width=image->columns+kernel->width-1; for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t x; ssize_t r; /* NOTE read virtual pixels, and authentic pixels, from the same image! ** we read using virtual to get virtual pixel handling, but write back ** into the same image. ** ** Only top half of kernel is processed as we do a single pass downward ** through the image iterating the distance function as we go. */ if (status == MagickFalse) break; p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1, exception); q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) status=MagickFalse; if (status == MagickFalse) break; p_indexes=GetCacheViewVirtualIndexQueue(virt_view); q_indexes=GetCacheViewAuthenticIndexQueue(auth_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = (ssize_t) virt_width*offy + offx; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t v; register ssize_t u; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; MagickPixelPacket result; /* Starting Defaults */ GetMagickPixelPacket(image,&result); SetMagickPixelPacket(image,q,q_indexes,&result); if ( method != VoronoiMorphology ) result.opacity = QuantumRange - result.opacity; switch ( method ) { case DistanceMorphology: /* Add kernel Value and select the minimum value found. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v <= (ssize_t) offy; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=0; u < (ssize_t) offx; u++, k--) { if ( x+u-offx < 0 ) continue; /* off the edge! */ if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } break; case VoronoiMorphology: /* Apply Distance to 'Matte' channel, while coping the color ** values of the closest pixel. ** ** This is experimental, and realy the 'alpha' component should ** be completely separate 'masking' channel so that alpha can ** also be used as part of the results. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v <= (ssize_t) offy; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=0; u < (ssize_t) offx; u++, k--) { if ( x+u-offx < 0 ) continue; /* off the edge! */ if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case VoronoiMorphology: SetPixelPacket(image,&result,q,q_indexes); break; default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) ) changed++; /* The pixel was changed in some way! */ p++; /* increment pixel buffers */ q++; } /* x */ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; if (SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse ) status=MagickFalse; } } /* y */ /* Do the reversed pass through the image */ for (y=(ssize_t)image->rows-1; y >= 0; y--) { register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t x; ssize_t r; if (status == MagickFalse) break; /* NOTE read virtual pixels, and authentic pixels, from the same image! ** we read using virtual to get virtual pixel handling, but write back ** into the same image. ** ** Only the bottom half of the kernel will be processes as we ** up the image. */ p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1, exception); q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) status=MagickFalse; if (status == MagickFalse) break; p_indexes=GetCacheViewVirtualIndexQueue(virt_view); q_indexes=GetCacheViewAuthenticIndexQueue(auth_view); /* adjust positions to end of row */ p += image->columns-1; q += image->columns-1; /* offset to origin in 'p'. while 'q' points to it directly */ r = offx; for (x=(ssize_t)image->columns-1; x >= 0; x--) { ssize_t v; register ssize_t u; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; MagickPixelPacket result; /* Default - previously modified pixel */ GetMagickPixelPacket(image,&result); SetMagickPixelPacket(image,q,q_indexes,&result); if ( method != VoronoiMorphology ) result.opacity = QuantumRange - result.opacity; switch ( method ) { case DistanceMorphology: /* Add kernel Value and select the minimum value found. */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=offy; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) { if ( (x+u-offx) >= (ssize_t)image->columns ) continue; if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } break; case VoronoiMorphology: /* Apply Distance to 'Matte' channel, coping the closest color. ** ** This is experimental, and realy the 'alpha' component should ** be completely separate 'masking' channel. */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=offy; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) { if ( (x+u-offx) >= (ssize_t)image->columns ) continue; if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case VoronoiMorphology: SetPixelPacket(image,&result,q,q_indexes); break; default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) ) changed++; /* The pixel was changed in some way! */ p--; /* go backward through pixel buffers */ q--; } /* x */ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; if ( SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse ) status=MagickFalse; } } /* y */ auth_view=DestroyCacheView(auth_view); virt_view=DestroyCacheView(virt_view); return(status ? (ssize_t) changed : -1); } /* Apply a Morphology by calling one of the above low level primitive ** application functions. This function handles any iteration loops, ** composition or re-iteration of results, and compound morphology methods ** that is based on multiple low-level (staged) morphology methods. ** ** Basically this provides the complex grue between the requested morphology ** method and raw low-level implementation (above). */ MagickExport Image *MorphologyApply(const Image *image, const ChannelType channel,const MorphologyMethod method, const ssize_t iterations, const KernelInfo *kernel, const CompositeOperator compose, const double bias, ExceptionInfo *exception) { CompositeOperator curr_compose; Image *curr_image, /* Image we are working with or iterating */ *work_image, /* secondary image for primitive iteration */ *save_image, /* saved image - for 'edge' method only */ *rslt_image; /* resultant image - after multi-kernel handling */ KernelInfo *reflected_kernel, /* A reflected copy of the kernel (if needed) */ *norm_kernel, /* the current normal un-reflected kernel */ *rflt_kernel, /* the current reflected kernel (if needed) */ *this_kernel; /* the kernel being applied */ MorphologyMethod primitive; /* the current morphology primitive being applied */ CompositeOperator rslt_compose; /* multi-kernel compose method for results to use */ MagickBooleanType special, /* do we use a direct modify function? */ verbose; /* verbose output of results */ size_t method_loop, /* Loop 1: number of compound method iterations (norm 1) */ method_limit, /* maximum number of compound method iterations */ kernel_number, /* Loop 2: the kernel number being applied */ stage_loop, /* Loop 3: primitive loop for compound morphology */ stage_limit, /* how many primitives are in this compound */ kernel_loop, /* Loop 4: iterate the kernel over image */ kernel_limit, /* number of times to iterate kernel */ count, /* total count of primitive steps applied */ kernel_changed, /* total count of changed using iterated kernel */ method_changed; /* total count of changed over method iteration */ ssize_t changed; /* number pixels changed by last primitive operation */ char v_info[MaxTextExtent]; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); count = 0; /* number of low-level morphology primitives performed */ if ( iterations == 0 ) return((Image *) NULL); /* null operation - nothing to do! */ kernel_limit = (size_t) iterations; if ( iterations < 0 ) /* negative interations = infinite (well alomst) */ kernel_limit = image->columns>image->rows ? image->columns : image->rows; verbose = IsMagickTrue(GetImageArtifact(image,"debug")); /* initialise for cleanup */ curr_image = (Image *) image; curr_compose = image->compose; (void) curr_compose; work_image = save_image = rslt_image = (Image *) NULL; reflected_kernel = (KernelInfo *) NULL; /* Initialize specific methods * + which loop should use the given iteratations * + how many primitives make up the compound morphology * + multi-kernel compose method to use (by default) */ method_limit = 1; /* just do method once, unless otherwise set */ stage_limit = 1; /* assume method is not a compound */ special = MagickFalse; /* assume it is NOT a direct modify primitive */ rslt_compose = compose; /* and we are composing multi-kernels as given */ switch( method ) { case SmoothMorphology: /* 4 primitive compound morphology */ stage_limit = 4; break; case OpenMorphology: /* 2 primitive compound morphology */ case OpenIntensityMorphology: case TopHatMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case EdgeMorphology: stage_limit = 2; break; case HitAndMissMorphology: rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */ /* FALL THUR */ case ThinningMorphology: case ThickenMorphology: method_limit = kernel_limit; /* iterate the whole method */ kernel_limit = 1; /* do not do kernel iteration */ break; case DistanceMorphology: case VoronoiMorphology: special = MagickTrue; /* use special direct primative */ break; default: break; } /* Apply special methods with special requirments ** For example, single run only, or post-processing requirements */ if ( special != MagickFalse ) { rslt_image=CloneImage(image,0,0,MagickTrue,exception); if (rslt_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse) { InheritException(exception,&rslt_image->exception); goto error_cleanup; } changed = MorphologyPrimitiveDirect(rslt_image, method, channel, kernel, exception); if ( verbose != MagickFalse ) (void) (void) FormatLocaleFile(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n", CommandOptionToMnemonic(MagickMorphologyOptions, method), 1.0,0.0,1.0, (double) changed); if ( changed < 0 ) goto error_cleanup; if ( method == VoronoiMorphology ) { /* Preserve the alpha channel of input image - but turned off */ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel); (void) CompositeImageChannel(rslt_image, DefaultChannels, CopyOpacityCompositeOp, image, 0, 0); (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel); } goto exit_cleanup; } /* Handle user (caller) specified multi-kernel composition method */ if ( compose != UndefinedCompositeOp ) rslt_compose = compose; /* override default composition for method */ if ( rslt_compose == UndefinedCompositeOp ) rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */ /* Some methods require a reflected kernel to use with primitives. * Create the reflected kernel for those methods. */ switch ( method ) { case CorrelateMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case SmoothMorphology: reflected_kernel = CloneKernelInfo(kernel); if (reflected_kernel == (KernelInfo *) NULL) goto error_cleanup; RotateKernelInfo(reflected_kernel,180); break; default: break; } /* Loops around more primitive morpholgy methods ** erose, dilate, open, close, smooth, edge, etc... */ /* Loop 1: iterate the compound method */ method_loop = 0; method_changed = 1; while ( method_loop < method_limit && method_changed > 0 ) { method_loop++; method_changed = 0; /* Loop 2: iterate over each kernel in a multi-kernel list */ norm_kernel = (KernelInfo *) kernel; this_kernel = (KernelInfo *) kernel; rflt_kernel = reflected_kernel; kernel_number = 0; while ( norm_kernel != NULL ) { /* Loop 3: Compound Morphology Staging - Select Primative to apply */ stage_loop = 0; /* the compound morphology stage number */ while ( stage_loop < stage_limit ) { stage_loop++; /* The stage of the compound morphology */ /* Select primitive morphology for this stage of compound method */ this_kernel = norm_kernel; /* default use unreflected kernel */ primitive = method; /* Assume method is a primitive */ switch( method ) { case ErodeMorphology: /* just erode */ case EdgeInMorphology: /* erode and image difference */ primitive = ErodeMorphology; break; case DilateMorphology: /* just dilate */ case EdgeOutMorphology: /* dilate and image difference */ primitive = DilateMorphology; break; case OpenMorphology: /* erode then dialate */ case TopHatMorphology: /* open and image difference */ primitive = ErodeMorphology; if ( stage_loop == 2 ) primitive = DilateMorphology; break; case OpenIntensityMorphology: primitive = ErodeIntensityMorphology; if ( stage_loop == 2 ) primitive = DilateIntensityMorphology; break; case CloseMorphology: /* dilate, then erode */ case BottomHatMorphology: /* close and image difference */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; if ( stage_loop == 2 ) primitive = ErodeMorphology; break; case CloseIntensityMorphology: this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateIntensityMorphology; if ( stage_loop == 2 ) primitive = ErodeIntensityMorphology; break; case SmoothMorphology: /* open, close */ switch ( stage_loop ) { case 1: /* start an open method, which starts with Erode */ primitive = ErodeMorphology; break; case 2: /* now Dilate the Erode */ primitive = DilateMorphology; break; case 3: /* Reflect kernel a close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; break; case 4: /* Finish the Close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ErodeMorphology; break; } break; case EdgeMorphology: /* dilate and erode difference */ primitive = DilateMorphology; if ( stage_loop == 2 ) { save_image = curr_image; /* save the image difference */ curr_image = (Image *) image; primitive = ErodeMorphology; } break; case CorrelateMorphology: /* A Correlation is a Convolution with a reflected kernel. ** However a Convolution is a weighted sum using a reflected ** kernel. It may seem stange to convert a Correlation into a ** Convolution as the Correlation is the simplier method, but ** Convolution is much more commonly used, and it makes sense to ** implement it directly so as to avoid the need to duplicate the ** kernel when it is not required (which is typically the ** default). */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ConvolveMorphology; break; default: break; } assert( this_kernel != (KernelInfo *) NULL ); /* Extra information for debugging compound operations */ if ( verbose != MagickFalse ) { if ( stage_limit > 1 ) (void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions,method),(double) method_loop,(double) stage_loop); else if ( primitive != method ) (void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions, method),(double) method_loop); else v_info[0] = '\0'; } /* Loop 4: Iterate the kernel with primitive */ kernel_loop = 0; kernel_changed = 0; changed = 1; while ( kernel_loop < kernel_limit && changed > 0 ) { kernel_loop++; /* the iteration of this kernel */ /* Create a clone as the destination image, if not yet defined */ if ( work_image == (Image *) NULL ) { work_image=CloneImage(image,0,0,MagickTrue,exception); if (work_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(work_image,DirectClass) == MagickFalse) { InheritException(exception,&work_image->exception); goto error_cleanup; } /* work_image->type=image->type; ??? */ } /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */ count++; changed = MorphologyPrimitive(curr_image, work_image, primitive, channel, this_kernel, bias, exception); if ( verbose != MagickFalse ) { if ( kernel_loop > 1 ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */ (void) (void) FormatLocaleFile(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g", v_info,CommandOptionToMnemonic(MagickMorphologyOptions, primitive),(this_kernel == rflt_kernel ) ? "*" : "", (double) (method_loop+kernel_loop-1),(double) kernel_number, (double) count,(double) changed); } if ( changed < 0 ) goto error_cleanup; kernel_changed += changed; method_changed += changed; /* prepare next loop */ { Image *tmp = work_image; /* swap images for iteration */ work_image = curr_image; curr_image = tmp; } if ( work_image == image ) work_image = (Image *) NULL; /* replace input 'image' */ } /* End Loop 4: Iterate the kernel with primitive */ if ( verbose != MagickFalse && kernel_changed != (size_t)changed ) (void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed); if ( verbose != MagickFalse && stage_loop < stage_limit ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */ #if 0 (void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image); (void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image); (void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image); (void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image); (void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image); #endif } /* End Loop 3: Primative (staging) Loop for Coumpound Methods */ /* Final Post-processing for some Compound Methods ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** Turn off SVG composition 'alpha blending'. */ switch( method ) { case EdgeOutMorphology: case EdgeInMorphology: case TopHatMorphology: case BottomHatMorphology: if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n%s: Difference with original image", CommandOptionToMnemonic(MagickMorphologyOptions,method)); (void) CompositeImageChannel(curr_image,(ChannelType) (channel & ~SyncChannels),DifferenceCompositeOp,image,0,0); break; case EdgeMorphology: if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode", CommandOptionToMnemonic(MagickMorphologyOptions,method)); (void) CompositeImageChannel(curr_image,(ChannelType) (channel & ~SyncChannels),DifferenceCompositeOp,save_image,0,0); save_image = DestroyImage(save_image); /* finished with save image */ break; default: break; } /* multi-kernel handling: re-iterate, or compose results */ if ( kernel->next == (KernelInfo *) NULL ) rslt_image = curr_image; /* just return the resulting image */ else if ( rslt_compose == NoCompositeOp ) { if ( verbose != MagickFalse ) { if ( this_kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " (re-iterate)"); else (void) FormatLocaleFile(stderr, " (done)"); } rslt_image = curr_image; /* return result, and re-iterate */ } else if ( rslt_image == (Image *) NULL) { if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, " (save for compose)"); rslt_image = curr_image; curr_image = (Image *) image; /* continue with original image */ } else { /* Add the new 'current' result to the composition ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** IE: Turn off SVG composition 'alpha blending'. */ if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, " (compose \"%s\")", CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) ); (void) CompositeImageChannel(rslt_image, (ChannelType) (channel & ~SyncChannels), rslt_compose, curr_image, 0, 0); curr_image = DestroyImage(curr_image); curr_image = (Image *) image; /* continue with original image */ } if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n"); /* loop to the next kernel in a multi-kernel list */ norm_kernel = norm_kernel->next; if ( rflt_kernel != (KernelInfo *) NULL ) rflt_kernel = rflt_kernel->next; kernel_number++; } /* End Loop 2: Loop over each kernel */ } /* End Loop 1: compound method interation */ goto exit_cleanup; /* Yes goto's are bad, but it makes cleanup lot more efficient */ error_cleanup: if ( curr_image == rslt_image ) curr_image = (Image *) NULL; if ( rslt_image != (Image *) NULL ) rslt_image = DestroyImage(rslt_image); exit_cleanup: if ( curr_image == rslt_image || curr_image == image ) curr_image = (Image *) NULL; if ( curr_image != (Image *) NULL ) curr_image = DestroyImage(curr_image); if ( work_image != (Image *) NULL ) work_image = DestroyImage(work_image); if ( save_image != (Image *) NULL ) save_image = DestroyImage(save_image); if ( reflected_kernel != (KernelInfo *) NULL ) reflected_kernel = DestroyKernelInfo(reflected_kernel); return(rslt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyImageChannel() applies a user supplied kernel to the image % according to the given mophology method. % % This function applies any and all user defined settings before calling % the above internal function MorphologyApply(). % % User defined settings include... % * Output Bias for Convolution and correlation ("-bias" or "-define convolve:bias=??") % * Kernel Scale/normalize settings ("-set 'option:convolve:scale'") % This can also includes the addition of a scaled unity kernel. % * Show Kernel being applied ("-set option:showKernel 1") % % The format of the MorphologyImage method is: % % Image *MorphologyImage(const Image *image,MorphologyMethod method, % const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception) % % Image *MorphologyImageChannel(const Image *image, const ChannelType % channel,MorphologyMethod method,const ssize_t iterations, % KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % Warning: kernel may be normalized for the Convolve method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod method,const ssize_t iterations, const KernelInfo *kernel,ExceptionInfo *exception) { Image *morphology_image; morphology_image=MorphologyImageChannel(image,DefaultChannels,method, iterations,kernel,exception); return(morphology_image); } MagickExport Image *MorphologyImageChannel(const Image *image, const ChannelType channel,const MorphologyMethod method, const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception) { KernelInfo *curr_kernel; CompositeOperator compose; double bias; Image *morphology_image; /* Apply Convolve/Correlate Normalization and Scaling Factors. * This is done BEFORE the ShowKernelInfo() function is called so that * users can see the results of the 'option:convolve:scale' option. */ curr_kernel = (KernelInfo *) kernel; bias=image->bias; if ((method == ConvolveMorphology) || (method == CorrelateMorphology)) { const char *artifact; artifact = GetImageArtifact(image,"convolve:bias"); if (artifact != (const char *) NULL) bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0); artifact = GetImageArtifact(image,"convolve:scale"); if ( artifact != (const char *) NULL ) { if ( curr_kernel == kernel ) curr_kernel = CloneKernelInfo(kernel); if (curr_kernel == (KernelInfo *) NULL) { curr_kernel=DestroyKernelInfo(curr_kernel); return((Image *) NULL); } ScaleGeometryKernelInfo(curr_kernel, artifact); } } /* display the (normalized) kernel via stderr */ if ( IsMagickTrue(GetImageArtifact(image,"showKernel")) || IsMagickTrue(GetImageArtifact(image,"convolve:showKernel")) || IsMagickTrue(GetImageArtifact(image,"morphology:showKernel")) ) ShowKernelInfo(curr_kernel); /* Override the default handling of multi-kernel morphology results * If 'Undefined' use the default method * If 'None' (default for 'Convolve') re-iterate previous result * Otherwise merge resulting images using compose method given. * Default for 'HitAndMiss' is 'Lighten'. */ { const char *artifact; compose = UndefinedCompositeOp; /* use default for method */ artifact = GetImageArtifact(image,"morphology:compose"); if ( artifact != (const char *) NULL) compose = (CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,artifact); } /* Apply the Morphology */ morphology_image = MorphologyApply(image, channel, method, iterations, curr_kernel, compose, bias, exception); /* Cleanup and Exit */ if ( curr_kernel != kernel ) curr_kernel=DestroyKernelInfo(curr_kernel); return(morphology_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateKernelInfo() rotates the kernel by the angle given. % % Currently it is restricted to 90 degree angles, of either 1D kernels % or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels. % It will ignore usless rotations for specific 'named' built-in kernels. % % The format of the RotateKernelInfo method is: % % void RotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is currently internal to this module only, but can be exported % to other modules if needed. */ static void RotateKernelInfo(KernelInfo *kernel, double angle) { /* angle the lower kernels first */ if ( kernel->next != (KernelInfo *) NULL) RotateKernelInfo(kernel->next, angle); /* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical ** ** TODO: expand beyond simple 90 degree rotates, flips and flops */ /* Modulus the angle */ angle = fmod(angle, 360.0); if ( angle < 0 ) angle += 360.0; if ( 337.5 < angle || angle <= 22.5 ) return; /* Near zero angle - no change! - At least not at this time */ /* Handle special cases */ switch (kernel->type) { /* These built-in kernels are cylindrical kernels, rotating is useless */ case GaussianKernel: case DoGKernel: case LoGKernel: case DiskKernel: case PeaksKernel: case LaplacianKernel: case ChebyshevKernel: case ManhattanKernel: case EuclideanKernel: return; /* These may be rotatable at non-90 angles in the future */ /* but simply rotating them in multiples of 90 degrees is useless */ case SquareKernel: case DiamondKernel: case PlusKernel: case CrossKernel: return; /* These only allows a +/-90 degree rotation (by transpose) */ /* A 180 degree rotation is useless */ case BlurKernel: if ( 135.0 < angle && angle <= 225.0 ) return; if ( 225.0 < angle && angle <= 315.0 ) angle -= 180; break; default: break; } /* Attempt rotations by 45 degrees -- 3x3 kernels only */ if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 ) { if ( kernel->width == 3 && kernel->height == 3 ) { /* Rotate a 3x3 square by 45 degree angle */ double t = kernel->values[0]; kernel->values[0] = kernel->values[3]; kernel->values[3] = kernel->values[6]; kernel->values[6] = kernel->values[7]; kernel->values[7] = kernel->values[8]; kernel->values[8] = kernel->values[5]; kernel->values[5] = kernel->values[2]; kernel->values[2] = kernel->values[1]; kernel->values[1] = t; /* rotate non-centered origin */ if ( kernel->x != 1 || kernel->y != 1 ) { ssize_t x,y; x = (ssize_t) kernel->x-1; y = (ssize_t) kernel->y-1; if ( x == y ) x = 0; else if ( x == 0 ) x = -y; else if ( x == -y ) y = 0; else if ( y == 0 ) y = x; kernel->x = (ssize_t) x+1; kernel->y = (ssize_t) y+1; } angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */ kernel->angle = fmod(kernel->angle+45.0, 360.0); } else perror("Unable to rotate non-3x3 kernel by 45 degrees"); } if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 ) { if ( kernel->width == 1 || kernel->height == 1 ) { /* Do a transpose of a 1 dimensional kernel, ** which results in a fast 90 degree rotation of some type. */ ssize_t t; t = (ssize_t) kernel->width; kernel->width = kernel->height; kernel->height = (size_t) t; t = kernel->x; kernel->x = kernel->y; kernel->y = t; if ( kernel->width == 1 ) { angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else { angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */ kernel->angle = fmod(kernel->angle+270.0, 360.0); } } else if ( kernel->width == kernel->height ) { /* Rotate a square array of values by 90 degrees */ { register size_t i,j,x,y; register double *k,t; k=kernel->values; for( i=0, x=kernel->width-1; i<=x; i++, x--) for( j=0, y=kernel->height-1; j<y; j++, y--) { t = k[i+j*kernel->width]; k[i+j*kernel->width] = k[j+x*kernel->width]; k[j+x*kernel->width] = k[x+y*kernel->width]; k[x+y*kernel->width] = k[y+i*kernel->width]; k[y+i*kernel->width] = t; } } /* rotate the origin - relative to center of array */ { register ssize_t x,y; x = (ssize_t) (kernel->x*2-kernel->width+1); y = (ssize_t) (kernel->y*2-kernel->height+1); kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2; kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2; } angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else perror("Unable to rotate a non-square, non-linear kernel 90 degrees"); } if ( 135.0 < angle && angle <= 225.0 ) { /* For a 180 degree rotation - also know as a reflection * This is actually a very very common operation! * Basically all that is needed is a reversal of the kernel data! * And a reflection of the origon */ double t; register double *k; size_t i, j; k=kernel->values; for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--) t=k[i], k[i]=k[j], k[j]=t; kernel->x = (ssize_t) kernel->width - kernel->x - 1; kernel->y = (ssize_t) kernel->height - kernel->y - 1; angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */ kernel->angle = fmod(kernel->angle+180.0, 360.0); } /* At this point angle should at least between -45 (315) and +45 degrees * In the future some form of non-orthogonal angled rotates could be * performed here, posibily with a linear kernel restriction. */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e G e o m e t r y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleGeometryKernelInfo() takes a geometry argument string, typically % provided as a "-set option:convolve:scale {geometry}" user setting, % and modifies the kernel according to the parsed arguments of that setting. % % The first argument (and any normalization flags) are passed to % ScaleKernelInfo() to scale/normalize the kernel. The second argument % is then passed to UnityAddKernelInfo() to add a scled unity kernel % into the scaled/normalized kernel. % % The format of the ScaleGeometryKernelInfo method is: % % void ScaleGeometryKernelInfo(KernelInfo *kernel, % const double scaling_factor,const MagickStatusType normalize_flags) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % o geometry: % The geometry string to parse, typically from the user provided % "-set option:convolve:scale {geometry}" setting. % */ MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel, const char *geometry) { GeometryFlags flags; GeometryInfo args; SetGeometryInfo(&args); flags = (GeometryFlags) ParseGeometry(geometry, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/ args.rho *= 0.01, args.sigma *= 0.01; if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */ args.rho = 1.0; if ( (flags & SigmaValue) == 0 ) args.sigma = 0.0; /* Scale/Normalize the input kernel */ ScaleKernelInfo(kernel, args.rho, flags); /* Add Unity Kernel, for blending with original */ if ( (flags & SigmaValue) != 0 ) UnityAddKernelInfo(kernel, args.sigma); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleKernelInfo() scales the given kernel list by the given amount, with or % without normalization of the sum of the kernel values (as per given flags). % % By default (no flags given) the values within the kernel is scaled % directly using given scaling factor without change. % % If either of the two 'normalize_flags' are given the kernel will first be % normalized and then further scaled by the scaling factor value given. % % Kernel normalization ('normalize_flags' given) is designed to ensure that % any use of the kernel scaling factor with 'Convolve' or 'Correlate' % morphology methods will fall into -1.0 to +1.0 range. Note that for % non-HDRI versions of IM this may cause images to have any negative results % clipped, unless some 'bias' is used. % % More specifically. Kernels which only contain positive values (such as a % 'Gaussian' kernel) will be scaled so that those values sum to +1.0, % ensuring a 0.0 to +1.0 output range for non-HDRI images. % % For Kernels that contain some negative values, (such as 'Sharpen' kernels) % the kernel will be scaled by the absolute of the sum of kernel values, so % that it will generally fall within the +/- 1.0 range. % % For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel % will be scaled by just the sum of the postive values, so that its output % range will again fall into the +/- 1.0 range. % % For special kernels designed for locating shapes using 'Correlate', (often % only containing +1 and -1 values, representing foreground/brackground % matching) a special normalization method is provided to scale the positive % values separately to those of the negative values, so the kernel will be % forced to become a zero-sum kernel better suited to such searches. % % WARNING: Correct normalization of the kernel assumes that the '*_range' % attributes within the kernel structure have been correctly set during the % kernels creation. % % NOTE: The values used for 'normalize_flags' have been selected specifically % to match the use of geometry options, so that '!' means NormalizeValue, '^' % means CorrelateNormalizeValue. All other GeometryFlags values are ignored. % % The format of the ScaleKernelInfo method is: % % void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor, % const MagickStatusType normalize_flags ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scaling_factor: % multiply all values (after normalization) by this factor if not % zero. If the kernel is normalized regardless of any flags. % % o normalize_flags: % GeometryFlags defining normalization method to use. % specifically: NormalizeValue, CorrelateNormalizeValue, % and/or PercentValue % */ MagickExport void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,const GeometryFlags normalize_flags) { register ssize_t i; register double pos_scale, neg_scale; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags); /* Normalization of Kernel */ pos_scale = 1.0; if ( (normalize_flags&NormalizeValue) != 0 ) { if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon ) /* non-zero-summing kernel (generally positive) */ pos_scale = fabs(kernel->positive_range + kernel->negative_range); else /* zero-summing kernel */ pos_scale = kernel->positive_range; } /* Force kernel into a normalized zero-summing kernel */ if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) { pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon ) ? kernel->positive_range : 1.0; neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon ) ? -kernel->negative_range : 1.0; } else neg_scale = pos_scale; /* finialize scaling_factor for positive and negative components */ pos_scale = scaling_factor/pos_scale; neg_scale = scaling_factor/neg_scale; for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) if ( ! IsNaN(kernel->values[i]) ) kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale; /* convolution output range */ kernel->positive_range *= pos_scale; kernel->negative_range *= neg_scale; /* maximum and minimum values in kernel */ kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale; kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale; /* swap kernel settings if user's scaling factor is negative */ if ( scaling_factor < MagickEpsilon ) { double t; t = kernel->positive_range; kernel->positive_range = kernel->negative_range; kernel->negative_range = t; t = kernel->maximum; kernel->maximum = kernel->minimum; kernel->minimum = 1; } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h o w K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShowKernelInfo() outputs the details of the given kernel defination to % standard error, generally due to a users 'showKernel' option request. % % The format of the ShowKernelInfo method is: % % void ShowKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickExport void ShowKernelInfo(const KernelInfo *kernel) { const KernelInfo *k; size_t c, i, u, v; for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) { (void) FormatLocaleFile(stderr, "Kernel"); if ( kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c ); (void) FormatLocaleFile(stderr, " \"%s", CommandOptionToMnemonic(MagickKernelOptions, k->type) ); if ( fabs(k->angle) >= MagickEpsilon ) (void) FormatLocaleFile(stderr, "@%lg", k->angle); (void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,(unsigned long) k->height,(long) k->x,(long) k->y); (void) FormatLocaleFile(stderr, " with values from %.*lg to %.*lg\n", GetMagickPrecision(), k->minimum, GetMagickPrecision(), k->maximum); (void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg", GetMagickPrecision(), k->negative_range, GetMagickPrecision(), k->positive_range); if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Zero-Summing)\n"); else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Normalized)\n"); else (void) FormatLocaleFile(stderr, " (Sum %.*lg)\n", GetMagickPrecision(), k->positive_range+k->negative_range); for (i=v=0; v < k->height; v++) { (void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v ); for (u=0; u < k->width; u++, i++) if ( IsNaN(k->values[i]) ) (void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan"); else (void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3, GetMagickPrecision(), k->values[i]); (void) FormatLocaleFile(stderr,"\n"); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n i t y A d d K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel % to the given pre-scaled and normalized Kernel. This in effect adds that % amount of the original image into the resulting convolution kernel. This % value is usually provided by the user as a percentage value in the % 'convolve:scale' setting. % % The resulting effect is to convert the defined kernels into blended % soft-blurs, unsharp kernels or into sharpening kernels. % % The format of the UnityAdditionKernelInfo method is: % % void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scale: % scaling factor for the unity kernel to be added to % the given kernel. % */ MagickExport void UnityAddKernelInfo(KernelInfo *kernel, const double scale) { /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) UnityAddKernelInfo(kernel->next, scale); /* Add the scaled unity kernel to the existing kernel */ kernel->values[kernel->x+kernel->y*kernel->width] += scale; CalcKernelMetaData(kernel); /* recalculate the meta-data */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z e r o K e r n e l N a n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroKernelNans() replaces any special 'nan' value that may be present in % the kernel with a zero value. This is typically done when the kernel will % be used in special hardware (GPU) convolution processors, to simply % matters. % % The format of the ZeroKernelNans method is: % % void ZeroKernelNans (KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickExport void ZeroKernelNans(KernelInfo *kernel) { register size_t i; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ZeroKernelNans(kernel->next); for (i=0; i < (kernel->width*kernel->height); i++) if ( IsNaN(kernel->values[i]) ) kernel->values[i] = 0.0; return; }
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/draw.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel.h" #include "magick/option.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resize-private.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/utility.h" #include "magick/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { MagickRealType (*filter)(const MagickRealType,const ResizeFilter *), (*window)(const MagickRealType,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension scaling to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ coefficient[7]; /* cubic coefficents for BC-cubic spline filters */ size_t signature; }; /* Forward declaractions. */ static MagickRealType I0(MagickRealType x), BesselOrderOne(MagickRealType), Sinc(const MagickRealType, const ResizeFilter *), SincFast(const MagickRealType, const ResizeFilter *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided. % % They are internal to this module only. See AcquireResizeFilterInfo() for % details of the access to these functions, via the GetResizeFilterSupport() % and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const MagickRealType x, % const MagickRealType support) % % A description of each parameter follows: % % o x: the distance from the sampling point generally in the range of 0 to % support. The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: current filter information. This allows function to % access support, and possibly other pre-calculated information defining % the functions. % */ #define MagickPIL ((MagickRealType) 3.14159265358979323846264338327950288420L) static MagickRealType Jinc(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions. http://mathworld.wolfram.com/JincFunction.html and page 11 of http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf The original "zoom" program by Paul Heckbert called this "Bessel". But really it is more accurately named "Jinc". */ if (x == 0.0) return(0.5*MagickPIL); return(BesselOrderOne(MagickPIL*x)/x); } static MagickRealType Blackman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2nd order cosine windowing function: 0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x) Refactored by Chantal Racette and Nicolas Robidoux to one trig call and five flops. */ const MagickRealType cospix = cos((double) (MagickPIL*x)); return(0.34+cospix*(0.5+cospix*0.16)); } static MagickRealType Bohman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function: (1-x) cos(pi x) + sin(pi x) / pi. Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops, taking advantage of the fact that the support of Bohman is 1 (so that we know that sin(pi x) >= 0). */ const double cospix = cos((double) (MagickPIL*x)); const double sinpix = sqrt(1.0-cospix*cospix); return((1.0-x)*cospix+(1.0/MagickPIL)*sinpix); } static MagickRealType Box(const MagickRealType magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { /* A Box filter is a equal weighting function (all weights equal). DO NOT LIMIT results by support or resize point sampling will work as it requests points beyond its normal 0.0 support size. */ return(1.0); } static MagickRealType CubicBC(const MagickRealType x, const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B= 1/3 C= 1/3 "Balanced" cubic spline filter Catmull-Rom B= 0 C= 1/2 Interpolatory and exact on linears Cubic B-Spline B= 1 C= 0 Spline approximation of Gaussian Hermite B= 0 C= 0 Spline with small support (= 1) See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/ Mitchell.pdf. Coefficents are determined from B,C values: P0 = ( 6 - 2*B )/6 = coeff[0] P1 = 0 P2 = (-18 +12*B + 6*C )/6 = coeff[1] P3 = ( 12 - 9*B - 6*C )/6 = coeff[2] Q0 = ( 8*B +24*C )/6 = coeff[3] Q1 = ( -12*B -48*C )/6 = coeff[4] Q2 = ( 6*B +30*C )/6 = coeff[5] Q3 = ( - 1*B - 6*C )/6 = coeff[6] which are used to define the filter: P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2 which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->coefficient[0]+x*(x* (resize_filter->coefficient[1]+x*resize_filter->coefficient[2]))); if (x < 2.0) return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x* (resize_filter->coefficient[5]+x*resize_filter->coefficient[6]))); return(0.0); } static MagickRealType Gaussian(const MagickRealType x, const ResizeFilter *resize_filter) { /* Gaussian with a fixed sigma = 1/2 Gaussian Formula (1D) ... exp( -(x^2)/((2.0*sigma^2) ) / sqrt(2*PI)sigma^2)) The constants are pre-calculated... exp( -coeff[0]*(x^2)) ) * coeff[1] However the multiplier coefficent (1) is not needed and not used. Gaussian Formula (2D) ... exp( -(x^2)/((2.0*sigma^2) ) / (PI*sigma^2) ) Note that it is only a change in the normalization multiplier which is not needed or used when gausian is used as a filter. This separates the gaussian 'sigma' value from the 'blur/support' settings allowing for its use in special 'small sigma' gaussians, without the filter 'missing' pixels because the support becomes too small. */ return(exp((double)(-resize_filter->coefficient[0]*x*x))); } static MagickRealType Hanning(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: .5+.5cos(pi x). */ const MagickRealType cospix = cos((double) (MagickPIL*x)); return(0.5+0.5*cospix); } static MagickRealType Hamming(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const MagickRealType cospix = cos((double) (MagickPIL*x)); return(0.54+0.46*cospix); } static MagickRealType Kaiser(const MagickRealType x, const ResizeFilter *resize_filter) { /* Kaiser Windowing Function (bessel windowing) Alpha (c[0]) is a free value from 5 to 8 (defaults to 6.5). A scaling factor (c[1]) is not needed as filter is normalized */ return(resize_filter->coefficient[1]* I0(resize_filter->coefficient[0]*sqrt((double) (1.0-x*x)))); } static MagickRealType Lagrange(const MagickRealType x, const ResizeFilter *resize_filter) { MagickRealType value; register ssize_t i; ssize_t n, order; /* Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is: for a support of 2, it gives a lagrange-4 (piecewise cubic function). "n" identifies the piece of the piecewise polynomial. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064. */ if (x > resize_filter->support) return(0.0); order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */ /*n=(ssize_t)((1.0*order)/2.0+x); -- which piece does x belong to */ n = (ssize_t)(resize_filter->window_support + x); value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static MagickRealType Quadratic(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static MagickRealType Sinc(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Scaled sinc(x) function using a trig call: sinc(x) == sin(pi x)/(pi x). */ if (x != 0.0) { const MagickRealType pix = (MagickRealType) (MagickPIL*x); return(sin((double) pix)/pix); } return((MagickRealType) 1.0); } static MagickRealType SincFast(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Approximations of the sinc function sin(pi x)/(pi x) over the interval [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding from the Natural Sciences and Engineering Research Council of Canada. Although the approximations are polynomials (for low order of approximation) and quotients of polynomials (for higher order of approximation) and consequently are similar in form to Taylor polynomials/Pade approximants, the approximations are computed with a completely different technique. Summary: These approximations are "the best" in terms of bang (accuracy) for the buck (flops). More specifically: Among the polynomial quotients that can be computed using a fixed number of flops (with a given "+ - * / budget"), the chosen polynomial quotient is the one closest to the approximated function with respect to maximum absolute relative error over the given interval. The Remez algorithm, as implemented in the boost library's minimax package, is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/math/doc/... ...sf_and_dist/html/math_toolkit/backgrounders/remez.html */ /* If outside of the interval of approximation, use the standard trig formula. */ if (x > 4.0) { const MagickRealType pix = (MagickRealType) (MagickPIL*x); return(sin((double) pix)/pix); } { /* The approximations only depend on x^2 (sinc is an even function). */ const MagickRealType xx = x*x; #if MAGICKCORE_QUANTUM_DEPTH <= 8 /* Maximum absolute relative error 6.3e-6 < 1/2^17. */ const MagickRealType c0 = 0.173610016489197553621906385078711564924e-2L; const MagickRealType c1 = -0.384186115075660162081071290162149315834e-3L; const MagickRealType c2 = 0.393684603287860108352720146121813443561e-4L; const MagickRealType c3 = -0.248947210682259168029030370205389323899e-5L; const MagickRealType c4 = 0.107791837839662283066379987646635416692e-6L; const MagickRealType c5 = -0.324874073895735800961260474028013982211e-8L; const MagickRealType c6 = 0.628155216606695311524920882748052490116e-10L; const MagickRealType c7 = -0.586110644039348333520104379959307242711e-12L; const MagickRealType p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #elif MAGICKCORE_QUANTUM_DEPTH <= 16 /* Max. abs. rel. error 2.2e-8 < 1/2^25. */ const MagickRealType c0 = 0.173611107357320220183368594093166520811e-2L; const MagickRealType c1 = -0.384240921114946632192116762889211361285e-3L; const MagickRealType c2 = 0.394201182359318128221229891724947048771e-4L; const MagickRealType c3 = -0.250963301609117217660068889165550534856e-5L; const MagickRealType c4 = 0.111902032818095784414237782071368805120e-6L; const MagickRealType c5 = -0.372895101408779549368465614321137048875e-8L; const MagickRealType c6 = 0.957694196677572570319816780188718518330e-10L; const MagickRealType c7 = -0.187208577776590710853865174371617338991e-11L; const MagickRealType c8 = 0.253524321426864752676094495396308636823e-13L; const MagickRealType c9 = -0.177084805010701112639035485248501049364e-15L; const MagickRealType p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9)))))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #else /* Max. abs. rel. error 1.2e-12 < 1/2^39. */ const MagickRealType c0 = 0.173611111110910715186413700076827593074e-2L; const MagickRealType c1 = -0.289105544717893415815859968653611245425e-3L; const MagickRealType c2 = 0.206952161241815727624413291940849294025e-4L; const MagickRealType c3 = -0.834446180169727178193268528095341741698e-6L; const MagickRealType c4 = 0.207010104171026718629622453275917944941e-7L; const MagickRealType c5 = -0.319724784938507108101517564300855542655e-9L; const MagickRealType c6 = 0.288101675249103266147006509214934493930e-11L; const MagickRealType c7 = -0.118218971804934245819960233886876537953e-13L; const MagickRealType p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); const MagickRealType d0 = 1.0L; const MagickRealType d1 = 0.547981619622284827495856984100563583948e-1L; const MagickRealType d2 = 0.134226268835357312626304688047086921806e-2L; const MagickRealType d3 = 0.178994697503371051002463656833597608689e-4L; const MagickRealType d4 = 0.114633394140438168641246022557689759090e-6L; const MagickRealType q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p); #endif } } static MagickRealType Triangle(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function for Sinc(). */ if (x < 1.0) return(1.0-x); return(0.0); } static MagickRealType Welsh(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Welsh parabolic windowing filter. */ if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Cubic Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Jinc (Bessel) % % Windowed Sinc/Jinc Filters % Blackman Hanning Hamming % Kaiser Lanczos % % Special purpose Filters % SincFast LanczosSharp Lanczos2D Lanczos2DSharp Robidoux % % The users "-filter" selection is used to lookup the default 'expert' % settings for that filter from a internal table. However any provided % 'expert' settings (see below) may override this selection. % % FIR filters are used as is, and are limited to that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (currently 1.5 or approximatally 3*sigma % as recommended by many references) % % The special a 'cylindrical' filter flag will promote the default 4-lobed % Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better % suited to this style of image resampling. This typically happens when using % such a filter for images distortions. % % Directly requesting 'Sinc', 'Jinc' function as a filter will force the use % of function without any windowing, or promotion for cylindrical usage. This % is not recommended, except by image processing experts, especially as part % of expert option filter function selection. % % Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is % computed using the traditional sin(pi*x)/(pi*x); it is selected if the user % specifically specifies the use of a Sinc filter. SincFast uses highly % accurate (and fast) polynomial (low Q) and rational (high Q) approximations, % and will be used by default in most cases. % % The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted % to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use). % The Sinc version is the most popular windowed filter. % % LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of % the Lanczos filter, specifically designed for EWA distortion (as a % Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos % (Sinc-Sinc) filter. The chosen blur value comes as close as possible to % satisfying the following condition without changing the character of the % corresponding EWA filter: % % 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with % only vertical or horizontal features are preserved when performing 'no-op" % with EWA distortion. % % The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos % filters. The 'sharp' version uses a blur factor of 0.9549963639785485, % again chosen because the resulting EWA filter comes as close as possible to % satisfying the above condition. % % Robidoux is another filter tuned for EWA. It is the Keys cubic filter % defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op' % Vertical and Horizontal Line Preservation Condition" exactly, and it % moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns % out to be close to both Mitchell and Lanczos2Sharp. For example, its first % crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the % first crossing of both Mitchell and Lanczos2Sharp. % % 'EXPERT' OPTIONS: % % These artifact "defines" are not recommended for production use without % expert knowledge of resampling, filtering, and the effects they have on the % resulting resampled (resize ro distorted) image. % % They can be used to override any and all filter default, and it is % recommended you make good use of "filter:verbose" to make sure that the % overall effect of your selection (before and after) is as expected. % % "filter:verbose" controls whether to output the exact results of the % filter selections made, as well as plotting data for graphing the % resulting filter over the filters support range. % % "filter:filter" select the main function associated with this filter % name, as the weighting function of the filter. This can be used to % set a windowing function as a weighting function, for special % purposes, such as graphing. % % If a "filter:window" operation has not been provided, a 'Box' % windowing function will be set to denote that no windowing function is % being used. % % "filter:window" Select this windowing function for the filter. While any % filter could be used as a windowing function, using the 'first lobe' of % that filter over the whole support window, using a non-windowing % function is not advisible. If no weighting filter function is specifed % a 'SincFast' filter is used. % % "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a % simpler method of setting filter support size that will correctly % handle the Sinc/Jinc switch for an operators filtering requirements. % Only integers should be given. % % "filter:support" Set the support size for filtering to the size given. % This not recommended for Sinc/Jinc windowed filters (lobes should be % used instead). This will override any 'filter:lobes' option. % % "filter:win-support" Scale windowing function to this size instead. This % causes the windowing (or self-windowing Lagrange filter) to act is if % the support window it much much larger than what is actually supplied % to the calling operator. The filter however is still clipped to the % real support size given, by the support range suppiled to the caller. % If unset this will equal the normal filter support size. % % "filter:blur" Scale the filter and support window by this amount. A value % > 1 will generally result in a more burred image with more ringing % effects, while a value <1 will sharpen the resulting image with more % aliasing effects. % % "filter:sigma" The sigma value to use for the Gaussian filter only. % Defaults to '1/2'. Using a different sigma effectively provides a % method of using the filter as a 'blur' convolution. Particularly when % using it for Distort. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic type of filter. % If only one of these are given it is assumes to be a 'Keys' type of % filter such that B+2C=1, where Keys 'alpha' value = C. % % Examples: % % Set a true un-windowed Sinc filter with 10 lobes (very slow): % -define filter:filter=Sinc % -define filter:lobes=8 % % Set an 8 lobe Lanczos (Sinc or Jinc) filter: % -filter Lanczos % -define filter:lobes=8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterTypes filter_type, const MagickBooleanType radial, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % The artifact settings listed above will override those selections. % % o blur: blur the filter by this amount, use 1.0 if unknown. Image % artifact "filter:blur" will override this API call usage, including any % internal change (such as for cylindrical usage). % % o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial) % filter (Jinc). % % o exception: return any errors or warnings in this structure. % */ MagickExport ResizeFilter *AcquireResizeFilter(const Image *image, const FilterTypes filter,const MagickRealType blur, const MagickBooleanType cylindrical,ExceptionInfo *exception) { const char *artifact; FilterTypes filter_type, window_type; MagickRealType B, C, value; register ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a "cylindrical", unless a 'Sinc' or 'SincFast' filter was specifically requested. WARNING: The order of this tabel must match the order of the FilterTypes enumeration specified in "resample.h", or the filter names will not match the filter being setup. You can check filter setups with the "filter:verbose" setting. */ static struct { FilterTypes filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */ { PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */ { BoxFilter, BoxFilter }, /* Box averaging filter */ { TriangleFilter, BoxFilter }, /* Linear interpolation filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFastFilter, HanningFilter }, /* Hanning -- cosine-sinc */ { SincFastFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */ { GaussianFilter, BoxFilter }, /* Gaussian blur filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */ { CubicFilter, BoxFilter }, /* Cubic B-Spline */ { CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */ { MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */ { JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */ { SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */ { SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */ { SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */ { SincFastFilter, WelshFilter }, /* Welsh -- parabolic-sinc */ { SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */ { SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */ { SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */ { LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */ { LanczosSharpFilter, LanczosSharpFilter }, /* | these require */ { Lanczos2Filter, Lanczos2Filter }, /* | special handling */ { Lanczos2SharpFilter,Lanczos2SharpFilter }, { RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */ }; /* Table mapping the filter/window from the above table to an actual function. The default support size for that filter as a weighting function, the range to scale with to use that function as a sinc windowing function, (typ 1.0). Note that the filter_type -> function is 1 to 1 except for Sinc(), SincFast(), and CubicBC() functions, which may have multiple filter to function associations. See "filter:verbose" handling below for the function -> filter mapping. */ static struct { MagickRealType (*function)(const MagickRealType, const ResizeFilter*), lobes, /* Default lobes/support size of the weighting filter. */ scale, /* Support when function used as a windowing function Typically equal to the location of the first zero crossing. */ B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */ } const filters[SentinelFilter] = { { Box, 0.5, 0.5, 0.0, 0.0 }, /* Undefined (default to Box) */ { Box, 0.0, 0.5, 0.0, 0.0 }, /* Point (special handling) */ { Box, 0.5, 0.5, 0.0, 0.0 }, /* Box */ { Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Triangle */ { CubicBC, 1.0, 1.0, 0.0, 0.0 }, /* Hermite (cubic B=C=0) */ { Hanning, 1.0, 1.0, 0.0, 0.0 }, /* Hanning, cosine window */ { Hamming, 1.0, 1.0, 0.0, 0.0 }, /* Hamming, '' variation */ { Blackman, 1.0, 1.0, 0.0, 0.0 }, /* Blackman, 2*cosine window */ { Gaussian, 2.0, 1.5, 0.0, 0.0 }, /* Gaussian */ { Quadratic, 1.5, 1.5, 0.0, 0.0 }, /* Quadratic gaussian */ { CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Cubic B-Spline (B=1,C=0) */ { CubicBC, 2.0, 1.0, 0.0, 0.5 }, /* Catmull-Rom (B=0,C=1/2) */ { CubicBC, 2.0, 8.0/7.0, 1./3., 1./3. }, /* Mitchell (B=C=1/3) */ { Jinc, 3.0, 1.2196698912665045, 0.0, 0.0 }, /* Raw 3-lobed Jinc */ { Sinc, 4.0, 1.0, 0.0, 0.0 }, /* Raw 4-lobed Sinc */ { SincFast, 4.0, 1.0, 0.0, 0.0 }, /* Raw fast sinc ("Pade"-type) */ { Kaiser, 1.0, 1.0, 0.0, 0.0 }, /* Kaiser (square root window) */ { Welsh, 1.0, 1.0, 0.0, 0.0 }, /* Welsh (parabolic window) */ { CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Parzen (B-Spline window) */ { Bohman, 1.0, 1.0, 0.0, 0.0 }, /* Bohman, 2*Cosine window */ { Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Bartlett (triangle window) */ { Lagrange, 2.0, 1.0, 0.0, 0.0 }, /* Lagrange sinc approximation */ { SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, 3-lobed Sinc-Sinc */ { SincFast, 3.0, 1.0, 0.0, 0.0 }, /* lanczos, Sharpened */ { SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos, 2-lobed */ { SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos2, sharpened */ { CubicBC, 2.0, 1.1685777620836932, 0.37821575509399867, 0.31089212245300067 } /* Robidoux: Keys cubic close to Lanczos2D sharpened */ }; /* The known zero crossings of the Jinc() or more accurately the Jinc(x*PI) function being used as a filter. It is used by the "filter:lobes" expert setting and for 'lobes' for Jinc functions in the previous table. This way users do not have to deal with the highly irrational lobe sizes of the Jinc filter. Values taken from http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using Jv-function with v=1, then dividing by PI. */ static MagickRealType jinc_zeros[16] = { 1.2196698912665045, 2.2331305943815286, 3.2383154841662362, 4.2410628637960699, 5.2427643768701817, 6.2439216898644877, 7.244759868719957, 8.2453949139520427, 9.2458926849494673, 10.246293348754916, 11.246622794877883, 12.246898461138105, 13.247132522181061, 14.247333735806849, 15.2475085630373, 16.247661874700962 }; /* Allocate resize filter. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter)); if (resize_filter == (ResizeFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* Defaults for the requested filter. */ filter_type=mapping[filter].filter; window_type=mapping[filter].window; resize_filter->blur = blur; /* function argument blur factor */ /* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */ if (cylindrical != MagickFalse && filter_type == SincFastFilter && filter != SincFastFilter ) filter_type=JincFilter; /* Expert filter setting override */ artifact=GetImageArtifact(image,"filter:filter"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { /* Raw filter request - no window function. */ filter_type=(FilterTypes) option; window_type=BoxFilter; } /* Filter override with a specific window function. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) window_type=(FilterTypes) option; } } else { /* Window specified, but no filter function? Assume Sinc/Jinc. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse, artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { filter_type=cylindrical != MagickFalse ? JincFilter : SincFastFilter; window_type=(FilterTypes) option; } } } /* Assign the real functions to use for the filters selected. */ resize_filter->filter=filters[filter_type].function; resize_filter->support=filters[filter_type].lobes; resize_filter->window=filters[window_type].function; resize_filter->scale=filters[window_type].scale; resize_filter->signature=MagickSignature; /* Filter Modifications for orthogonal/cylindrical usage */ if (cylindrical != MagickFalse) switch (filter_type) { case BoxFilter: /* Support for Cylindrical Box should be sqrt(2)/2 */ resize_filter->support=(MagickRealType) MagickSQ1_2; break; case LanczosFilter: case LanczosSharpFilter: case Lanczos2Filter: case Lanczos2SharpFilter: resize_filter->filter=filters[JincFilter].function; resize_filter->window=filters[JincFilter].function; resize_filter->scale=filters[JincFilter].scale; /* number of lobes (support window size) remain unchanged */ break; default: break; } /* Global Sharpening (regardless of orthoginal/cylindrical) */ switch (filter_type) { case LanczosSharpFilter: resize_filter->blur *= 0.9812505644269356; break; case Lanczos2SharpFilter: resize_filter->blur *= 0.9549963639785485; break; default: break; } /* ** Other Expert Option Modifications */ /* User Gaussian Sigma Override - no support change */ value = 0.5; /* guassian sigma default, half pixel */ if ( GaussianFilter ) { artifact=GetImageArtifact(image,"filter:sigma"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Gaussian */ resize_filter->coefficient[0]=1.0/(2.0*value*value); /* X scaling */ resize_filter->coefficient[1]=(MagickRealType) (1.0/(Magick2PI*value* value)); /* normalization */ } /* User Kaiser Alpha Override - no support change */ if ( KaiserFilter ) { value=6.5; /* default alpha value for Kaiser bessel windowing function */ artifact=GetImageArtifact(image,"filter:alpha"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Kaiser Windowing Function */ resize_filter->coefficient[0]=value; /* X scaling */ resize_filter->coefficient[1]=1.0/I0(value); /* normalization */ } /* Blur Override */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur*=StringToDouble(artifact,(char **) NULL); if (resize_filter->blur < MagickEpsilon) resize_filter->blur=(MagickRealType) MagickEpsilon; /* Support Overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { ssize_t lobes; lobes=(ssize_t) StringToLong(artifact); if (lobes < 1) lobes=1; resize_filter->support=(MagickRealType) lobes; } /* Convert a Jinc function lobes value to a real support value */ if (resize_filter->filter == Jinc) { if (resize_filter->support > 16) resize_filter->support=jinc_zeros[15]; /* largest entry in table */ else resize_filter->support = jinc_zeros[((long)resize_filter->support)-1]; } /* expert override of the support setting */ artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL)); /* Scale windowing function separately to the support 'clipping' window that calling operator is planning to actually use. (Expert override) */ resize_filter->window_support=resize_filter->support; /* default */ artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL)); /* Adjust window function scaling to match windowing support for weighting function. This avoids a division on every filter call. */ resize_filter->scale /= resize_filter->window_support; /* * Set Cubic Spline B,C values, calculate Cubic coefficients. */ B=0.0; C=0.0; if ((filters[filter_type].function == CubicBC) || (filters[window_type].function == CubicBC)) { B=filters[filter_type].B; C=filters[filter_type].C; if (filters[window_type].function == CubicBC) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=StringToDouble(artifact,(char **) NULL); C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */ artifact=GetImageArtifact(image,"filter:c"); /* user C override */ if (artifact != (const char *) NULL) C=StringToDouble(artifact,(char **) NULL); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=StringToDouble(artifact,(char **) NULL); B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */ } } /* Convert B,C values into Cubic Coefficents. See CubicBC(). */ { const double twoB = B+B; resize_filter->coefficient[0]=1.0-(1.0/3.0)*B; resize_filter->coefficient[1]=-3.0+twoB+C; resize_filter->coefficient[2]=2.0-1.5*B-C; resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C; resize_filter->coefficient[4]=-8.0*C-twoB; resize_filter->coefficient[5]=B+5.0*C; resize_filter->coefficient[6]=(-1.0/6.0)*B-C; } } /* Expert Option Request for verbose details of the resulting filter. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp master { #endif artifact=GetImageArtifact(image,"filter:verbose"); if (IsMagickTrue(artifact)) { double support, x; /* Set the weighting function properly when the weighting function may not exactly match the filter of the same name. EG: a Point filter is really uses a Box weighting function with a different support than is typically used. */ if (resize_filter->filter == Box) filter_type=BoxFilter; if (resize_filter->filter == Sinc) filter_type=SincFilter; if (resize_filter->filter == SincFast) filter_type=SincFastFilter; if (resize_filter->filter == Jinc) filter_type=JincFilter; if (resize_filter->filter == CubicBC) filter_type=CubicFilter; if (resize_filter->window == Box) window_type=BoxFilter; if (resize_filter->window == Sinc) window_type=SincFilter; if (resize_filter->window == SincFast) window_type=SincFastFilter; if (resize_filter->window == Jinc) window_type=JincFilter; if (resize_filter->window == CubicBC) window_type=CubicFilter; /* Report Filter Details. */ support=GetResizeFilterSupport(resize_filter); /* practical_support */ (void) FormatLocaleFile(stdout,"# Resize Filter (for graphing)\n#\n"); (void) FormatLocaleFile(stdout,"# filter = %s\n", CommandOptionToMnemonic(MagickFilterOptions,filter_type)); (void) FormatLocaleFile(stdout,"# window = %s\n", CommandOptionToMnemonic(MagickFilterOptions, window_type)); (void) FormatLocaleFile(stdout,"# support = %.*g\n", GetMagickPrecision(),(double) resize_filter->support); (void) FormatLocaleFile(stdout,"# win-support = %.*g\n", GetMagickPrecision(),(double) resize_filter->window_support); (void) FormatLocaleFile(stdout,"# scale_blur = %.*g\n", GetMagickPrecision(), (double)resize_filter->blur); if ( filter_type == GaussianFilter ) (void) FormatLocaleFile(stdout,"# gaussian_sigma = %.*g\n", GetMagickPrecision(), (double)value); if ( filter_type == KaiserFilter ) (void) FormatLocaleFile(stdout,"# kaiser_alpha = %.*g\n", GetMagickPrecision(), (double)value); (void) FormatLocaleFile(stdout,"# practical_support = %.*g\n", GetMagickPrecision(), (double)support); if ( filter_type == CubicFilter || window_type == CubicFilter ) (void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n", GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C); (void) FormatLocaleFile(stdout,"\n"); /* Output values of resulting filter graph -- for graphing filter result. */ for (x=0.0; x <= support; x+=0.01f) (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,GetMagickPrecision(), (double) GetResizeFilterWeight(resize_filter,x)); /* A final value so gnuplot can graph the 'stop' properly. */ (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support, GetMagickPrecision(),0.0); } /* Output the above once only for each image - remove setting */ (void) DeleteImageArtifact((Image *) image,"filter:verbose"); #if defined(MAGICKCORE_OPENMP_SUPPORT) } #endif return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { #define AdaptiveResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; Image *resize_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Adaptively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse) { InheritException(exception,&resize_image->exception); resize_image=DestroyImage(resize_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireCacheView(image); resize_view=AcquireCacheView(resize_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { MagickPixelPacket pixel; PointInfo offset; register IndexPacket *restrict resize_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (PixelPacket *) NULL) continue; resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); offset.y=((MagickRealType) (y+0.5)*image->rows/resize_image->rows); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) resize_image->columns; x++) { offset.x=((MagickRealType) (x+0.5)*image->columns/resize_image->columns); (void) InterpolateMagickPixelPacket(image,image_view, MeshInterpolatePixel,offset.x-0.5,offset.y-0.5,&pixel,exception); SetPixelPacket(resize_image,&pixel,q,resize_indexes+x); q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) continue; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveResizeImage) #endif proceed=SetImageProgress(image,AdaptiveResizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) resize_image=DestroyImage(resize_image); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0. This is used to create the Jinc() filter function below. % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % MagickRealType BesselOrderOne(MagickRealType x) % % A description of each parameter follows: % % o x: MagickRealType value. % */ #undef I0 static MagickRealType I0(MagickRealType x) { MagickRealType sum, t, y; register ssize_t i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((MagickRealType) i*i); } return(sum); } #undef J1 static MagickRealType J1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static MagickRealType P1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static MagickRealType Q1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static MagickRealType BesselOrderOne(MagickRealType x) { MagickRealType p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)- cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+ cos((double) x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the DestroyResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickExport ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); resize_filter->signature=(~MagickSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % MagickRealType GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickExport MagickRealType GetResizeFilterSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % MagickRealType GetResizeFilterWeight(const ResizeFilter *resize_filter, % const MagickRealType x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickExport MagickRealType GetResizeFilterWeight( const ResizeFilter *resize_filter,const MagickRealType x) { MagickRealType scale, weight, x_blur; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point or Box Filter -- avoid division by zero */ else { scale=resize_filter->scale; scale=resize_filter->window(x_blur*scale,resize_filter); } weight=scale*resize_filter->filter(x_blur,resize_filter); return(weight); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() is a convenience method that scales an image proportionally % to twice its size. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { Image *magnify_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); magnify_image=ResizeImage(image,2*image->columns,2*image->rows,CubicFilter, 1.0,exception); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally % to half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,CubicFilter,1.0, exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterTypes filter,const double blur, ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; size_t height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=(size_t) (x_resolution*image->columns/(image->x_resolution == 0.0 ? 72.0 : image->x_resolution)+0.5); height=(size_t) (y_resolution*image->rows/(image->y_resolution == 0.0 ? 72.0 : image->y_resolution)+0.5); resample_image=ResizeImage(image,width,height,filter,blur,exception); if (resample_image != (Image *) NULL) { resample_image->x_resolution=x_resolution; resample_image->y_resolution=y_resolution; } return(resample_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image, % const size_t columns,const size_t rows, % const double delta_x,const double rigidity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns, const size_t rows,const double delta_x,const double rigidity, ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" CacheView *rescale_view; const char *map; guchar *packet; Image *rescale_image; int x, y; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MagickPixelPacket pixel; unsigned char *pixels; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ResizeImage(image,columns,rows,image->filter,image->blur,exception)); if ((columns >= (2*image->columns)) || (rows >= (2*image->rows))) { Image *resize_image; size_t height, width; /* Honor liquid resize size limitations. */ for (width=image->columns; columns >= (2*width-1); width*=2); for (height=image->rows; rows >= (2*height-1); height*=2); resize_image=ResizeImage(image,width,height,image->filter,image->blur, exception); if (resize_image == (Image *) NULL) return((Image *) NULL); rescale_image=LiquidRescaleImage(resize_image,columns,rows,delta_x, rigidity,exception); resize_image=DestroyImage(resize_image); return(rescale_image); } map="RGB"; if (image->matte == MagickFalse) map="RGBA"; if (image->colorspace == CMYKColorspace) { map="CMYK"; if (image->matte == MagickFalse) map="CMYKA"; } pixels=(unsigned char *) AcquireQuantumMemory(image->columns,image->rows* strlen(map)*sizeof(*pixels)); if (pixels == (unsigned char *) NULL) return((Image *) NULL); status=ExportImagePixels(image,0,0,image->columns,image->rows,map,CharPixel, pixels,exception); if (status == MagickFalse) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } carver=lqr_carver_new(pixels,image->columns,image->rows,strlen(map)); if (carver == (LqrCarver *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,columns,rows); (void) lqr_status; rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass) == MagickFalse) { InheritException(exception,&rescale_image->exception); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } GetMagickPixelPacket(rescale_image,&pixel); (void) lqr_carver_scan_reset(carver); rescale_view=AcquireCacheView(rescale_image); while (lqr_carver_scan(carver,&x,&y,&packet) != 0) { register IndexPacket *restrict rescale_indexes; register PixelPacket *restrict q; q=QueueCacheViewAuthenticPixels(rescale_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; rescale_indexes=GetCacheViewAuthenticIndexQueue(rescale_view); pixel.red=QuantumRange*(packet[0]/255.0); pixel.green=QuantumRange*(packet[1]/255.0); pixel.blue=QuantumRange*(packet[2]/255.0); if (image->colorspace != CMYKColorspace) { if (image->matte == MagickFalse) pixel.opacity=QuantumRange*(packet[3]/255.0); } else { pixel.index=QuantumRange*(packet[3]/255.0); if (image->matte == MagickFalse) pixel.opacity=QuantumRange*(packet[4]/255.0); } SetPixelPacket(rescale_image,&pixel,q,rescale_indexes); if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse) break; } rescale_view=DestroyCacheView(rescale_view); /* Relinquish resources. */ lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const size_t magick_unused(columns),const size_t magick_unused(rows), const double magick_unused(delta_x),const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","`%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using the given % filter (see AcquireFilterInfo()). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const size_t columns, % const size_t rows,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. Typically set % this to 1.0. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { MagickRealType weight; ssize_t pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { register ssize_t i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishMagickMemory( contribution[i]); contribution=(ContributionInfo **) RelinquishMagickMemory(contribution); return(contribution); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { register ssize_t i; ContributionInfo **contribution; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (ssize_t) number_threads; i++) { contribution[i]=(ContributionInfo *) AcquireQuantumMemory(count, sizeof(**contribution)); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType x_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **restrict contributions; MagickBooleanType status; MagickPixelPacket zero; MagickRealType scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=1.0/scale; (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireCacheView(image); resize_view=AcquireCacheView(resize_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for shared(status) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { MagickRealType center, density; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ContributionInfo *restrict contribution; register IndexPacket *restrict resize_indexes; register PixelPacket *restrict q; register ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; center=(MagickRealType) (x+0.5)/x_factor; start=(ssize_t) MagickMax(center-support+0.5,0.0); stop=(ssize_t) MagickMin(center+support+0.5,(double) image->columns); density=0.0; contribution=contributions[GetOpenMPThreadId()]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-center+0.5)); density+=contribution[n].weight; } if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=1.0/density; for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); for (y=0; y < (ssize_t) resize_image->rows; y++) { MagickPixelPacket pixel; MagickRealType alpha; register ssize_t i; ssize_t j; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=alpha*GetPixelOpacity(p+j); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+y,ClampToQuantum( pixel.index)); } } else { MagickRealType gamma; gamma=0.0; for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale* GetPixelAlpha(p+j); pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j); gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale* GetPixelAlpha(p+j); pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+y,ClampToQuantum(gamma* pixel.index)); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(ssize_t) (MagickMin(MagickMax(center,(double) start),(double) stop- 1.0)+0.5); j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i-start].pixel-contribution[0].pixel); SetPixelIndex(resize_indexes+y,GetPixelIndex( indexes+j)); } q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HorizontalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType y_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **restrict contributions; MagickBooleanType status; MagickPixelPacket zero; MagickRealType scale, support; ssize_t y; /* Apply filter to resize vertically from image to resize image. */ scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=1.0/scale; (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireCacheView(image); resize_view=AcquireCacheView(resize_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for shared(status) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { MagickRealType center, density; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ContributionInfo *restrict contribution; register IndexPacket *restrict resize_indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t n, start, stop; if (status == MagickFalse) continue; center=(MagickRealType) (y+0.5)/y_factor; start=(ssize_t) MagickMax(center-support+0.5,0.0); stop=(ssize_t) MagickMin(center+support+0.5,(double) image->rows); density=0.0; contribution=contributions[GetOpenMPThreadId()]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-center+0.5)); density+=contribution[n].weight; } if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=1.0/density; for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel, image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1), exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); for (x=0; x < (ssize_t) resize_image->columns; x++) { MagickPixelPacket pixel; MagickRealType alpha; register ssize_t i; ssize_t j; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=alpha*GetPixelOpacity(p+j); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+x,ClampToQuantum( pixel.index)); } } else { MagickRealType gamma; gamma=0.0; for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale* GetPixelAlpha(p+j); pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j); gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale* GetPixelAlpha(p+j); pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+x,ClampToQuantum(gamma* pixel.index)); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(ssize_t) (MagickMin(MagickMax(center,(double) start),(double) stop- 1.0)+0.5); j=(ssize_t) ((contribution[i-start].pixel-contribution[0].pixel)* image->columns+x); SetPixelIndex(resize_indexes+x, GetPixelIndex(indexes+j)); } q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_VerticalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } MagickExport Image *ResizeImage(const Image *image,const size_t columns, const size_t rows,const FilterTypes filter,const double blur, ExceptionInfo *exception) { #define WorkLoadFactor 0.265 FilterTypes filter_type; Image *filter_image, *resize_image; MagickOffsetType offset; MagickRealType x_factor, y_factor; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; /* Acquire resize image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter) && (blur == 1.0)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return(resize_image); /* Acquire resize filter. */ x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; if ((x_factor*y_factor) > WorkLoadFactor) filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); else filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) return(DestroyImage(resize_image)); filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,blur,MagickFalse, exception); /* Resize image. */ offset=0; if ((x_factor*y_factor) > WorkLoadFactor) { span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &offset,exception); status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&offset,exception); } else { span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &offset,exception); status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&offset,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" CacheView *image_view, *sample_view; Image *sample_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t x; ssize_t *x_offset, y; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Allocate scan line buffer and column offset buffers. */ x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); if (x_offset == (ssize_t *) NULL) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (x=0; x < (ssize_t) sample_image->columns; x++) x_offset[x]=(ssize_t) (((MagickRealType) x+0.5)*image->columns/ sample_image->columns); /* Sample each row. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); sample_view=AcquireCacheView(sample_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) sample_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict sample_indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t y_offset; if (status == MagickFalse) continue; y_offset=(ssize_t) (((MagickRealType) y+0.5)*image->rows/ sample_image->rows); p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); sample_indexes=GetCacheViewAuthenticIndexQueue(sample_view); /* Sample each column. */ for (x=0; x < (ssize_t) sample_image->columns; x++) *q++=p[x_offset[x]]; if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) sample_image->columns; x++) SetPixelIndex(sample_indexes+x, GetPixelIndex(indexes+x_offset[x])); if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SampleImage) #endif proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sample_view=DestroyCacheView(sample_view); x_offset=(ssize_t *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" CacheView *image_view, *scale_view; Image *scale_image; MagickBooleanType next_column, next_row, proceed; MagickPixelPacket pixel, *scale_scanline, *scanline, *x_vector, *y_vector, zero; MagickRealType alpha; PointInfo scale, span; register ssize_t i; ssize_t number_rows, y; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse) { InheritException(exception,&scale_image->exception); scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*scanline)); scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) scale_image->columns,sizeof(*scale_scanline)); y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*y_vector)); if ((scanline == (MagickPixelPacket *) NULL) || (scale_scanline == (MagickPixelPacket *) NULL) || (x_vector == (MagickPixelPacket *) NULL) || (y_vector == (MagickPixelPacket *) NULL)) { scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) ResetMagickMemory(y_vector,0,(size_t) image->columns* sizeof(*y_vector)); GetMagickPixelPacket(image,&pixel); (void) ResetMagickMemory(&zero,0,sizeof(zero)); i=0; image_view=AcquireCacheView(image); scale_view=AcquireCacheView(scale_image); for (y=0; y < (ssize_t) scale_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict scale_indexes; register MagickPixelPacket *restrict s, *restrict t; register PixelPacket *restrict q; register ssize_t x; q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1, exception); if (q == (PixelPacket *) NULL) break; alpha=1.0; scale_indexes=GetCacheViewAuthenticIndexQueue(scale_view); if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha*GetPixelIndex(indexes+x)); p++; } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha* GetPixelIndex(indexes+x)); p++; } number_rows++; } for (x=0; x < (ssize_t) image->columns; x++) { y_vector[x].red+=scale.y*x_vector[x].red; y_vector[x].green+=scale.y*x_vector[x].green; y_vector[x].blue+=scale.y*x_vector[x].blue; if (scale_image->matte != MagickFalse) y_vector[x].opacity+=scale.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) y_vector[x].index+=scale.y*x_vector[x].index; } span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha* GetPixelIndex(indexes+x)); p++; } number_rows++; next_row=MagickFalse; } s=scanline; for (x=0; x < (ssize_t) image->columns; x++) { pixel.red=y_vector[x].red+span.y*x_vector[x].red; pixel.green=y_vector[x].green+span.y*x_vector[x].green; pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue; if (image->matte != MagickFalse) pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index=y_vector[x].index+span.y*x_vector[x].index; s->red=pixel.red; s->green=pixel.green; s->blue=pixel.blue; if (scale_image->matte != MagickFalse) s->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) s->index=pixel.index; s++; y_vector[x]=zero; } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ s=scanline; for (x=0; x < (ssize_t) scale_image->columns; x++) { if (scale_image->matte != MagickFalse) alpha=QuantumScale*(QuantumRange-s->opacity); alpha=1.0/(fabs(alpha) <= MagickEpsilon ? 1.0 : alpha); SetPixelRed(q,ClampToQuantum(alpha*s->red)); SetPixelGreen(q,ClampToQuantum(alpha*s->green)); SetPixelBlue(q,ClampToQuantum(alpha*s->blue)); if (scale_image->matte != MagickFalse) SetPixelOpacity(q,ClampToQuantum(s->opacity)); if (scale_indexes != (IndexPacket *) NULL) SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*s->index)); q++; s++; } } else { /* Scale X direction. */ pixel=zero; next_column=MagickFalse; span.x=1.0; s=scanline; t=scale_scanline; for (x=0; x < (ssize_t) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { pixel=zero; t++; } pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { pixel=zero; next_column=MagickFalse; t++; } pixel.red+=scale.x*s->red; pixel.green+=scale.x*s->green; pixel.blue+=scale.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=scale.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=scale.x*s->index; span.x-=scale.x; } s++; } if (span.x > 0) { s--; pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; } if ((next_column == MagickFalse) && ((ssize_t) (t-scale_scanline) < (ssize_t) scale_image->columns)) { t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; } /* Transfer scanline to scaled image. */ t=scale_scanline; for (x=0; x < (ssize_t) scale_image->columns; x++) { if (scale_image->matte != MagickFalse) alpha=QuantumScale*(QuantumRange-t->opacity); alpha=1.0/(fabs(alpha) <= MagickEpsilon ? 1.0 : alpha); SetPixelRed(q,ClampToQuantum(alpha*t->red)); SetPixelGreen(q,ClampToQuantum(alpha*t->green)); SetPixelBlue(q,ClampToQuantum(alpha*t->blue)); if (scale_image->matte != MagickFalse) SetPixelOpacity(q,ClampToQuantum(t->opacity)); if (scale_indexes != (IndexPacket *) NULL) SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*t->index)); t++; q++; } } if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse) break; proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } scale_view=DestroyCacheView(scale_view); image_view=DestroyCacheView(image_view); /* Free allocated memory. */ y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector); scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline); x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleFactor 5 char value[MaxTextExtent]; const char *name; Image *thumbnail_image; MagickRealType x_factor, y_factor; size_t version; struct stat attributes; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; if ((x_factor*y_factor) > 0.1) thumbnail_image=ResizeImage(image,columns,rows,image->filter,image->blur, exception); else if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128)) thumbnail_image=ResizeImage(image,columns,rows,image->filter, image->blur,exception); else { Image *sample_image; sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows, exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter, image->blur,exception); sample_image=DestroyImage(sample_image); } if (thumbnail_image == (Image *) NULL) return(thumbnail_image); (void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page); if (thumbnail_image->matte == MagickFalse) (void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; /* Strip all profiles except color profiles. */ ResetImageProfileIterator(thumbnail_image); for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; ) { if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) { (void) DeleteImageProfile(thumbnail_image,name); ResetImageProfileIterator(thumbnail_image); } name=GetNextImageProfile(thumbnail_image); } (void) DeleteImageProperty(thumbnail_image,"comment"); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (strstr(image->magick_filename,"//") == (char *) NULL) (void) FormatLocaleString(value,MaxTextExtent,"file://%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (GetPathAttributes(image->filename,&attributes) != MagickFalse) { (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) attributes.st_mtime); (void) SetImageProperty(thumbnail_image,"Thumb::MTime",value); } (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),MagickFalse,value); (void) ConcatenateMagickString(value,"B",MaxTextExtent); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value); (void) FormatLocaleString(value,MaxTextExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value); (void) SetImageProperty(thumbnail_image,"software", GetMagickVersion(&version)); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) image->magick_columns); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) image->magick_rows); (void) SetImageProperty(thumbnail_image,"Thumb::Image::height",value); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) GetImageListLength(image)); (void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value); return(thumbnail_image); }
gems.c
#include <stdio.h> #include <stdlib.h> /***************************************************************/ #include <omp.h> int main(void) { int np = omp_get_max_threads(); if (np<2) exit(1); /* allocate from the heap with global visibility */ int * A = malloc(np*sizeof(int)); #pragma omp parallel shared(A) { int me = omp_get_thread_num(); int B = 134; /* store local data B at PE 0 into A at PE 1 */ if (me==0) A[1] = B; /* global synchronization of execution and data */ #pragma omp barrier /* observe the result of the store */ if (me==1) printf("A@1=%d\n",A[1]); fflush(stdout); } free(A); return 0; } /***************************************************************/
fft_impl_generic.h
/* Copyright 2017-2019 Michal Zientkiewicz All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef GENFFT_IMPL_GENERIC_H #define GENFFT_IMPL_GENERIC_H #include <cassert> #include "../FFTTwiddle.h" namespace genfft { namespace impl_generic { template <int N, class T> struct FFTGeneric { FFTGeneric<N/2, T> next; template <bool inv> void transform_impl(T *data) { next.template transform_impl<inv>(data); next.template transform_impl<inv>(data+N); #ifdef FFT_OPENMP_SIMD #pragma omp simd #endif for (int i=0; i<N; i+=2) { T wr = twiddle[i]; T wi = twiddle[i+1]; T tempr = inv ? data[i+N]*wr + data[i+N+1]*wi : data[i+N]*wr - data[i+N+1]*wi; T tempi = inv ? data[i+N+1]*wr - data[i+N]*wi : data[i+N]*wi + data[i+N+1]*wr; data[i+N] = data[i]-tempr; data[i+N+1] = data[i+1]-tempi; data[i] += tempr; data[i+1] += tempi; } } const Twiddle<N, T> twiddle; }; template <class T> struct FFTGeneric<4, T> { template <bool inv> void transform_impl(T *data) { T tr = data[2]; T ti = data[3]; data[2] = data[0]-tr; data[3] = data[1]-ti; data[0] += tr; data[1] += ti; tr = data[6]; ti = data[7]; data[6] = inv ? ti-data[5] : data[5]-ti; data[7] = inv ? data[4]-tr : tr-data[4]; data[4] += tr; data[5] += ti; tr = data[4]; ti = data[5]; data[4] = data[0]-tr; data[5] = data[1]-ti; data[0] += tr; data[1] += ti; tr = data[6]; ti = data[7]; data[6] = data[2]-tr; data[7] = data[3]-ti; data[2] += tr; data[3] += ti; } }; template <class T> struct FFTGeneric<2, T> { template <bool inv_unused> void transform_impl(T *data) { T tr = data[2]; T ti = data[3]; data[2] = data[0]-tr; data[3] = data[1]-ti; data[0] += tr; data[1] += ti; } }; template <class T> struct FFTGeneric<1, T> { template <bool inv_unused> void transform_impl(T *) {} }; template <int N, class T> struct FFTVertGeneric { FFTVertGeneric<N/2, T> next; template <bool inv> void transform_impl(T *data, stride_t stride, index_t cols) { const stride_t half = (N/2) * stride; index_t next_col = cols; // Process the input in vertical spans, 32 complex numbers wide. // The last span may be wider, up to 48. for (index_t col=0; col<cols; col=next_col) { next_col = cols - col >= 48 ? col + 32 : cols; index_t span_width = next_col - col; T *span_data = data + 2*col; next.template transform_impl<inv>(span_data, stride, span_width); next.template transform_impl<inv>(span_data+half, stride, span_width); for (int i=0; i<N/2; i++) { T wr = twiddle[2*i]; T wi = twiddle[2*i+1]; T *even = span_data + i*stride; T *odd = even + half; #ifdef FFT_OPENMP_SIMD #pragma omp simd #endif for (index_t j=0; j<2*span_width; j+=2) { T tempr, tempi; tempr = inv ? odd[j]*wr + odd[j+1]*wi : odd[j]*wr - odd[j+1]*wi; tempi = inv ? odd[j+1]*wr - odd[j]*wi : odd[j+1]*wr + odd[j]*wi; odd[j] = even[j]-tempr; odd[j+1] = even[j+1]-tempi; even[j] += tempr; even[j+1] += tempi; } } } } const Twiddle<N, T> twiddle; }; template <class T> struct FFTVertGeneric<4, T> { template <bool inv> void transform_impl(T *data, stride_t stride, index_t cols) { T *row0 = data; T *row1 = row0+stride; T *row2 = row1+stride; T *row3 = row2+stride; #ifdef FFT_OPENMP_SIMD #pragma omp simd #endif for (index_t j=0; j<cols*2; j+=2) { T tr = row1[j]; T ti = row1[j+1]; row1[j] = row0[j]-tr; row1[j+1] = row0[j+1]-ti; row0[j] += tr; row0[j+1] += ti; tr = row3[j]; ti = row3[j+1]; row3[j] = inv ? ti-row2[j+1] : row2[j+1]-ti; row3[j+1] = inv ? row2[j]-tr : tr-row2[j]; row2[j] += tr; row2[j+1] += ti; tr = row2[j]; ti = row2[j+1]; row2[j] = row0[j]-tr; row2[j+1] = row0[j+1]-ti; row0[j] += tr; row0[j+1] += ti; tr = row3[j]; ti = row3[j+1]; row3[j] = row1[j]-tr; row3[j+1] = row1[j+1]-ti; row1[j] += tr; row1[j+1] += ti; } } }; template <class T> struct FFTVertGeneric<2, T> { template <bool inv_unused> void transform_impl(T* data, stride_t stride, index_t cols) { T* row0 = data; T* row1 = data+stride; for (index_t i=0; i<2*cols; i+=2) { T tr = row1[i]; T ti = row1[i+1]; row1[i ] = row0[i ]-tr; row1[i+1] = row0[i+1]-ti; row0[i ] += tr; row0[i+1] += ti; } } }; template <class T> struct FFTVertGeneric<1, T> { template <bool inv_unused> void transform_impl(T*, int, int) {} }; template <class T> inline std::shared_ptr<impl::FFTBase<T>> GetImpl(int n, T) { switch (n) { #define SELECT_FFT_LEVEL(x) case (1<<x): return impl::FFTLevel<(1<<x), T, FFTGeneric<(1<<x), T>>::GetInstance(); SELECT_FFT_LEVEL(0); SELECT_FFT_LEVEL(1); SELECT_FFT_LEVEL(2); SELECT_FFT_LEVEL(3); SELECT_FFT_LEVEL(4); SELECT_FFT_LEVEL(5); SELECT_FFT_LEVEL(6); SELECT_FFT_LEVEL(7); SELECT_FFT_LEVEL(8); SELECT_FFT_LEVEL(9); SELECT_FFT_LEVEL(10); SELECT_FFT_LEVEL(11); SELECT_FFT_LEVEL(12); SELECT_FFT_LEVEL(13); SELECT_FFT_LEVEL(14); SELECT_FFT_LEVEL(15); SELECT_FFT_LEVEL(16); SELECT_FFT_LEVEL(17); SELECT_FFT_LEVEL(18); SELECT_FFT_LEVEL(19); SELECT_FFT_LEVEL(20); SELECT_FFT_LEVEL(21); SELECT_FFT_LEVEL(22); SELECT_FFT_LEVEL(23); #undef SELECT_FFT_LEVEL default: assert(!"unsupported size"); } } template <class T> inline std::shared_ptr<impl::FFTVertBase<T>> GetVertImpl(int n, T) { switch (n) { #define SELECT_FFT_LEVEL(x) case (1<<x): return impl::FFTVertLevel<(1<<x), T, FFTVertGeneric<(1<<x), T>>::GetInstance(); SELECT_FFT_LEVEL(0); SELECT_FFT_LEVEL(1); SELECT_FFT_LEVEL(2); SELECT_FFT_LEVEL(3); SELECT_FFT_LEVEL(4); SELECT_FFT_LEVEL(5); SELECT_FFT_LEVEL(6); SELECT_FFT_LEVEL(7); SELECT_FFT_LEVEL(8); SELECT_FFT_LEVEL(9); SELECT_FFT_LEVEL(10); SELECT_FFT_LEVEL(11); SELECT_FFT_LEVEL(12); SELECT_FFT_LEVEL(13); SELECT_FFT_LEVEL(14); SELECT_FFT_LEVEL(15); SELECT_FFT_LEVEL(16); SELECT_FFT_LEVEL(17); SELECT_FFT_LEVEL(18); SELECT_FFT_LEVEL(19); SELECT_FFT_LEVEL(20); SELECT_FFT_LEVEL(21); SELECT_FFT_LEVEL(22); SELECT_FFT_LEVEL(23); #undef SELECT_FFT_LEVEL default: assert(!"unsupported size"); } } } // impl_generic } // genfft #include "fft_dit_generic.h" #endif /* GENFFT_IMPL_GENERIC_H */
seidel.pluto.par2d.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> double A[N][N+17]; void init_arrays() { int i, j; for (i=0; i<N; i++) for (j=0; j<N; j++) A[i][j] = i*i+j*j; } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); #include <math.h> #include <assert.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define S1(zT0,zT1,zT2,zT3,zT4,zT5,t,i,j) {A[i][j]=(A[1+i][1+j]+A[1+i][j]+A[1+i][j-1]+A[i][1+j]+A[i][j]+A[i][j-1]+A[i-1][1+j]+A[i-1][j]+A[i-1][j-1])/9;} int c1, c2, c3, c4, c5, c6, c7, c8, c9; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; omp_set_nested(1); omp_set_num_threads(2); /* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 5.45s. */ for (c1=-2;c1<=floord(4*T+3*N-10,256);c1++) { lb1=max(max(max(0,ceild(256*c1-2*T-N-251,512)),ceild(256*c1-3*T-2*N+7,256)),ceild(256*c1-N-761,1024)); ub1=min(min(min(floord(256*c1+2*N+505,1024),floord(256*c1+509,512)),floord(64*c1+127,64)),floord(T+N-3,256)); #pragma omp parallel for shared(c1,lb1,ub1) private(lb2,ub2,c2,c3,c4,c5,c6,c7,c8,c9) for (c2=lb1; c2<=ub1; c2++) { lb2=max(max(max(max(max(max(ceild(256*c1-256*c2-T+1,256),ceild(512*c1-512*c2-253,768)),0),ceild(512*c2-N-252,256)),ceild(128*c1-256*c2-127,128)),ceild(128*c2-127,128)),ceild(128*c1-127,256)); ub2=min(min(min(min(min(min(floord(256*c1-256*c2+255,256),floord(256*c1-512*c2+N+253,256)),floord(256*c2+T+N+252,256)),floord(T+N-3,128)),floord(256*c1+N+508,512)),floord(256*c1-256*c2+N+253,384)),floord(512*c2+N+507,256)); #pragma omp parallel for shared(c1,c2,lb1,ub1,lb2,ub2) private(c3,c4,c5,c6,c7,c8,c9) for (c3=lb2; c3<=ub2; c3++) { for (c4=max(max(max(max(0,ceild(-256*c2+256*c3-N-284,32)),8*c1-8*c2-8*c3),ceild(256*c2-N-29,32)),ceild(128*c3-N-29,32));c4<=min(min(min(min(8*c1-8*c2-8*c3+7,floord(256*c3+253,64)),floord(T-1,32)),floord(128*c2+127,16)),floord(-128*c2+128*c3+127,16));c4++) { for (c5=max(max(max(max(max(8*c2,ceild(16*c4-15,16)),ceild(256*c3-T-N-28,32)),0),ceild(256*c3-32*c4-N-60,32)),ceild(256*c3-N-59,64));c5<=min(min(min(min(min(floord(32*c4+N+29,32),floord(128*c3+127,16)),8*c2+7),floord(128*c3-16*c4+127,16)),floord(T+N-3,32)),floord(256*c3+N+252,64));c5++) { for (c6=max(max(max(max(max(ceild(64*c4-29,32),8*c3),ceild(16*c5-15,16)),ceild(16*c4+16*c5-15,16)),0),ceild(64*c5-N-28,32));c6<=min(min(min(min(min(8*c3+7,floord(T+N-3,16)),floord(32*c4+32*c5+N+60,32)),floord(32*c4+N+29,16)),floord(64*c5+N+59,32)),floord(32*c5+T+N+28,32));c6++) { for (c7=max(max(max(max(0,32*c4),32*c5-N+2),16*c6-N+2),-32*c5+32*c6-N-29);c7<=min(min(min(min(-32*c5+32*c6+30,floord(32*c6+29,2)),T-1),32*c5+30),32*c4+31);c7++) { /*@ begin Loop( transform UnrollJam(ufactor=8) for (c8=max(max(32*c5,c7+1),32*c6-c7-N+2);c8<=min(min(32*c6-c7+30,32*c5+31),c7+N-2);c8++) transform Unroll(ufactor=8) for (c9=max(c7+c8+1,32*c6);c9<=min(32*c6+31,c7+c8+N-2);c9++) { S1(c1-c2-c3,-c1+2*c2+c3,-c1+2*c3,c4,-c4+c5,-c4-c5+c6,c7,-c7+c8,-c7-c8+c9) ; } ) @*/ for (c8=max(max(32*c5,c7+1),32*c6-c7-N+2);c8<=min(min(32*c6-c7+30,32*c5+31),c7+N-2);c8++) { for (c9=max(c7+c8+1,32*c6);c9<=min(32*c6+31,c7+c8+N-2);c9++) { S1(c1-c2-c3,-c1+2*c2+c3,-c1+2*c3,c4,-c4+c5,-c4-c5+c6,c7,-c7+c8,-c7-c8+c9) ; } } /*@ end @*/ } } } } } } } /* End of CLooG code */ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; printf("%f\n", annot_t_total); return ((int) A[0][0]); }
main.c
/*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <parboil.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "util.h" #define UINT8_MAX 255 /****************************************************************************** * Implementation: Reference * Details: * This implementations is a scalar, minimally optimized version. The only * optimization, which reduces the number of pointer chasing operations is the * use of a temporary pointer for each row. ******************************************************************************/ int main(int argc, char* argv[]) { struct pb_TimerSet timers; struct pb_Parameters *parameters; printf("Base implementation of histogramming.\n"); printf("Maintained by Nady Obeid <obeid1@ece.uiuc.edu>\n"); parameters = pb_ReadParameters(&argc, argv); if (!parameters) return -1; if(!parameters->inpFiles[0]){ fputs("Input file expected\n", stderr); return -1; } int numIterations; if (argc >= 2){ numIterations = atoi(argv[1]); } else { fputs("Expected at least one command line argument\n", stderr); return -1; } pb_InitializeTimerSet(&timers); char *inputStr = "Input"; char *outputStr = "Output"; pb_AddSubTimer(&timers, inputStr, pb_TimerID_IO); pb_AddSubTimer(&timers, outputStr, pb_TimerID_IO); pb_SwitchToSubTimer(&timers, inputStr, pb_TimerID_IO); unsigned int img_width, img_height; unsigned int histo_width, histo_height; FILE* f = fopen(parameters->inpFiles[0],"rb"); int result = 0; result += fread(&img_width, sizeof(unsigned int), 1, f); result += fread(&img_height, sizeof(unsigned int), 1, f); result += fread(&histo_width, sizeof(unsigned int), 1, f); result += fread(&histo_height, sizeof(unsigned int), 1, f); if (result != 4){ fputs("Error reading input and output dimensions from file\n", stderr); return -1; } unsigned int* img = (unsigned int*) malloc (img_width*img_height*sizeof(unsigned int)); unsigned char* histo = (unsigned char*) calloc (histo_width*histo_height, sizeof(unsigned char)); pb_SwitchToSubTimer(&timers, "Input", pb_TimerID_IO); result = fread(img, sizeof(unsigned int), img_width*img_height, f); fclose(f); if (result != img_width*img_height){ fputs("Error reading input array from file\n", stderr); return -1; } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); int iter; for (iter = 0; iter < numIterations; iter++){ memset(histo,0,histo_height*histo_width*sizeof(unsigned char)); unsigned int i; #pragma omp parallel for for (i = 0; i < img_width*img_height; ++i) { const unsigned int value = img[i]; #pragma omp critical if (histo[value] < UINT8_MAX) { ++histo[value]; } } } // pb_SwitchToTimer(&timers, pb_TimerID_IO); pb_SwitchToSubTimer(&timers, outputStr, pb_TimerID_IO); if (parameters->outFile) { dump_histo_img(histo, histo_height, histo_width, parameters->outFile); } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); free(img); free(histo); pb_SwitchToTimer(&timers, pb_TimerID_NONE); printf("\n"); pb_PrintTimerSet(&timers); pb_FreeParameters(parameters); return 0; }
homomorphic_functions.c
long long int sk[NUM_PRIME][4096]; long long int pk0[NUM_PRIME][4096], pk1[NUM_PRIME][4096]; long long int rlk00[NUM_PRIME][4096], rlk01[NUM_PRIME][4096], rlk10[NUM_PRIME][4096], rlk11[NUM_PRIME][4096]; long long int rlk20[NUM_PRIME][4096], rlk21[NUM_PRIME][4096], rlk30[NUM_PRIME][4096], rlk31[NUM_PRIME][4096], rlk40[NUM_PRIME][4096], rlk41[NUM_PRIME][4096]; //mpz_t quotient[THREADS], rem[THREADS]; //mpz_t temp_array64[THREADS]; //mpz_t chunk[THREADS]; //mpz_t temp_array512[THREADS]; void coefficient_mul_q(long long int a[], long long int b[], long long int c[], int prime_index) { int j; if(prime_index==0) { printf("TTTTTTTTTTTTTTTTTTL\n"); printf("%ld %ld\n", a[0], b[0]); } for(j=0; j<4096; j++) { c[j] = mod(a[j] * b[j], prime_index); } if(prime_index==0) { printf("%ld\n", c[0]); } } void coefficient_add_q(long long int a[], long long int b[], long long int c[], int prime_index) { int j; for(j=0; j<4096; j++) { c[j] = mod(a[j] + b[j], prime_index); } } void read_keys() { FILE *fp; int i; static mpz_t big_array[4096]; mpz_array_init(big_array[0], 4096, 256); /////////////////////////////////////////////////////////////////////////// /////////////////////// Public key reading ///////////////////////////// fp = fopen("sage_generated_key/pk0", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, pk0[i], i); } fp = fopen("sage_generated_key/pk1", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, pk1[i], i); } fp = fopen("sage_generated_key/sk", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, sk[i], i); } /* fp = fopen("keys/pk0_0to4095_q0", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk0[0][i]); fclose(fp); fp = fopen("keys/pk0_0to4095_q1", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk0[1][i]); fclose(fp); fp = fopen("keys/pk0_0to4095_q2", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk0[2][i]); fclose(fp); fp = fopen("keys/pk0_0to4095_q3", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk0[3][i]); fclose(fp); fp = fopen("keys/pk0_0to4095_q4", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk0[4][i]); fclose(fp); fp = fopen("keys/pk0_0to4095_q5", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk0[5][i]); fclose(fp); fp = fopen("keys/pk0_0to4095_q6", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk0[6][i]); fclose(fp); fp = fopen("keys/pk1_0to4095_q0", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk1[0][i]); fclose(fp); fp = fopen("keys/pk1_0to4095_q1", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk1[1][i]); fclose(fp); fp = fopen("keys/pk1_0to4095_q2", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk1[2][i]); fclose(fp); fp = fopen("keys/pk1_0to4095_q3", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk1[3][i]); fclose(fp); fp = fopen("keys/pk1_0to4095_q4", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk1[4][i]); fclose(fp); fp = fopen("keys/pk1_0to4095_q5", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk1[5][i]); fclose(fp); fp = fopen("keys/pk1_0to4095_q6", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &pk1[6][i]); fclose(fp); fp = fopen("keys/sk_0to4095_q0", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &sk[0][i]); fclose(fp); fp = fopen("keys/sk_0to4095_q1", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &sk[1][i]); fclose(fp); fp = fopen("keys/sk_0to4095_q2", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &sk[2][i]); fclose(fp); fp = fopen("keys/sk_0to4095_q3", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &sk[3][i]); fclose(fp); fp = fopen("keys/sk_0to4095_q4", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &sk[4][i]); fclose(fp); fp = fopen("keys/sk_0to4095_q5", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &sk[5][i]); fclose(fp); fp = fopen("keys/sk_0to4095_q6", "r"); for(i=0; i<4096; i++) fscanf(fp, "%lu", &sk[6][i]); fclose(fp); */ /////////////////////////////////////////////////////////////////////////// //////////////// Relinearisation key reading ///////////////////////////// fp = fopen("sage_generated_key/rlk0_0", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk00[i], i); } fp = fopen("sage_generated_key/rlk0_1", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk01[i], i); } fp = fopen("sage_generated_key/rlk1_0", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk10[i], i); } fp = fopen("sage_generated_key/rlk1_1", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk11[i], i); } /* fp = fopen("keys/rlk00_0to4095", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk00[i], i); } fp = fopen("keys/rlk01_0to4095", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk01[i], i); } fp = fopen("keys/rlk10_0to4095", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk10[i], i); } fp = fopen("keys/rlk11_0to4095", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<7; i++) { compute_mod(big_array, rlk11[i], i); } fp = fopen("keys/rlk20_0to4095", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk20[i], i); } fp = fopen("keys/rlk21_0to4095", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk21[i], i); } fp = fopen("keys/rlk30_0to4095", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk30[i], i); } fp = fopen("keys/rlk31_0to4095", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk31[i], i); } fp = fopen("keys/rlk40_0to4095", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk40[i], i); } fp = fopen("keys/rlk41_0to4095", "r"); for(i=0; i<4096; i++) gmp_fscanf(fp, "%Zd", big_array[i]); fclose(fp); for(i=0; i<NUM_PRIME; i++) { compute_mod(big_array, rlk41[i], i); } */ /////////////////////////////////////////////////////////////////////////// ////////////////// Compute FFT of the keys ///////////////////////////// for(i=0; i<NUM_PRIME; i++) { fwd_ntt_q(pk0[i], i); fwd_ntt_q(pk1[i], i); fwd_ntt_q(sk[i], i); fwd_ntt_q(rlk00[i], i); fwd_ntt_q(rlk01[i], i); fwd_ntt_q(rlk10[i], i); fwd_ntt_q(rlk11[i], i); fwd_ntt_q(rlk20[i], i); fwd_ntt_q(rlk21[i], i); fwd_ntt_q(rlk30[i], i); fwd_ntt_q(rlk31[i], i); fwd_ntt_q(rlk40[i], i); fwd_ntt_q(rlk41[i], i); } } void FV_recrypt(long long int c0[][4096], long long int c1[][4096]) { int m[4096]; FV_dec_q(m, c0, c1); FV_enc_q(m, c0, c1); } void FV_enc_q(int m[], long long int c0[][4096], long long int c1[][4096]) { int i, j, r; long long int primrt; long long int m_encoded[4096], e1[4096], e2[4096], u[4096], u_copy[4096], pk0_mul_u[4096], pk1_mul_u[4096], e1_plus_m_encoded[4096]; knuth_yao(e1); knuth_yao(e2); for(i=0; i<4096; i++) { r = rand() % 2; if(rand()%2==1) r = -r; u[i] = r; } for(i=0; i<NUM_PRIME; i++) { for(j=0; j<4096; j++) m_encoded[j] = m[j] * pby_t[i]; poly_copy(u, u_copy); fwd_ntt_q(u_copy, i); //fwd_ntt_q(pk0[i], i); //fwd_ntt_q(pk1[i], i); coefficient_mul_q(pk0[i], u_copy, pk0_mul_u, i); coefficient_mul_q(pk1[i], u_copy, pk1_mul_u, i); // e1_plus_m_encoded <-- m_encoded + e1 inv_ntt_q(pk0_mul_u, i); // pk0_mul_u <-- pk0*u inv_ntt_q(pk1_mul_u, i); // pk1_mul_u <-- pk1*u coefficient_add_q(e1, m_encoded, e1_plus_m_encoded, i); // e1_plus_m_encoded <-- m_encoded + e1 coefficient_add_q(pk0_mul_u, e1_plus_m_encoded, c0[i], i); // c0[i] <-- pk0*u + e1 + m_encoded coefficient_add_q(pk1_mul_u, e2, c1[i], i); // c1[i] <-- pk1*u + e2 } } /* void create_crt_rom(mpz_t q[], int length) { int i, j; mpz_t q_full, Ni, Ni_inv, temp; mpz_init(q_full); mpz_init(Ni); mpz_init(Ni_inv); mpz_init(temp); mpz_t mask; mpz_init(mask); mpz_set_str(q_full, "1", 10); mpz_set_str(mask, "262143", 10); for(i=0; i<length; i++) mpz_mul(q_full, q_full, q[i]); for(j=0; j<length; j++) { mpz_fdiv_q(Ni, q_full, q[j]); mpz_invert(Ni_inv, Ni, q[j]); gmp_printf("mux8_18bits rom(18'd%Zd, ", Ni_inv); for(i=0; i<length-1; i++) { mpz_and(temp, Ni, mask); if(i<length-2) gmp_printf("18'd%Zd, ", temp); else gmp_printf("18'd%Zd, 18'd0, 18'd0, address, dataout);\n\n", temp); mpz_sub(Ni, Ni, temp); mpz_fdiv_q_2exp(Ni, Ni, 18); } } } */ void inverse_crt_length7(long long int c0[][4096], mpz_t c0_full[]) { int i, j; int thread_num; mpz_t temp; mpz_init(temp); //#pragma omp parallel for private(thread_num, j) for(i=0; i<4096; i++) { //thread_num = omp_get_thread_num(); for(j=0; j<NUM_PRIME; j++) { mpz_mul_ui(temp, Ni_length7[j], c0[j][i]); mpz_mul(temp, temp, Ni_inv_length7[j]); mpz_mod(temp, temp, p_full_length7); // temp = c0[i][j]*Ni*Ni_inv mod q_full if(j==0) mpz_set(c0_full[i], temp); else mpz_add(c0_full[i], c0_full[i], temp); } mpz_mod(c0_full[i], c0_full[i], p_full_length7); } } void inverse_crt_length15(long long int c0[][4096], mpz_t c0_full[]) { int i, j; int thread_num; mpz_t temp; mpz_init(temp); //#pragma omp parallel for private(thread_num, j) for(i=0; i<4096; i++) { for(j=0; j<NUM_PRIME_EXT; j++) { mpz_mul_ui(temp, Ni_length15[j], c0[j][i]); mpz_mul(temp, temp, Ni_inv_length15[j]); mpz_mod(temp, temp, p_full_length15); // temp = c0[i][j]*Ni*Ni_inv mod q_full if(j==0) mpz_set(c0_full[i], temp); else mpz_add(c0_full[i], c0_full[i], temp); } mpz_mod(c0_full[i], c0_full[i], p_full_length15); } } int round_tx(mpz_t a[]) // computes round(t*c/q) { int i; int thread_num; mpz_t quotient, rem; mpz_init(quotient); mpz_init(rem); //#pragma omp parallel for private(thread_num) for(i=4095; i>=0; i--) { //thread_num = omp_get_thread_num(); mpz_mul_ui(a[i], a[i], t); // a[i] <-- a[i]*t if(mpz_cmp_ui(a[i], 0)<0) // a[i] is -ve { mpz_ui_sub(a[i], 0, a[i]); mpz_fdiv_qr(quotient, rem, a[i], p_full_length7); if(mpz_cmp(rem, p_full_length7_by2)>0) mpz_add_ui(quotient, quotient, 1); mpz_ui_sub(a[i], 0, quotient); } else { mpz_fdiv_qr(quotient, rem, a[i], p_full_length7); if(mpz_cmp(rem, p_full_length7_by2)>0) mpz_add_ui(quotient, quotient, 1); //gmp_printf("quo rem p_full_length7_by2 %Zd %Zd %Zd\n", quotient, rem, p_full_length7_by2); mpz_set(a[i], quotient); } } } int round_tx_mod(mpz_t a[]) // computes mod( round(t*c/q), q ) { int i; int thread_num; mpz_t quotient, rem; mpz_init(quotient); mpz_init(rem); //#pragma omp parallel for private(thread_num) for(i=0; i<4096; i++) { //thread_num = omp_get_thread_num(); mpz_mul_ui(a[i], a[i], t); if(mpz_cmp_ui(a[i], 0)<0) // a[i] is -ve { mpz_ui_sub(a[i], 0, a[i]); mpz_fdiv_qr(quotient, rem, a[i], p_full_length7); if(mpz_cmp(rem, p_full_length7_by2)>0) mpz_add_ui(quotient, quotient, 1); mpz_ui_sub(a[i], 0, quotient); mpz_mod(a[i], a[i], p_full_length7); } else { mpz_fdiv_qr(quotient, rem, a[i], p_full_length7); if(mpz_cmp(rem, p_full_length7_by2)>0) mpz_add_ui(quotient, quotient, 1); mpz_set(a[i], quotient); mpz_mod(a[i], a[i], p_full_length7); } } } void FV_dec_q(int m[], long long int c0[][4096], long long int c1[][4096]) { int i; long long int sk_mul_c1[NUM_PRIME][4096]; mpz_t c1_full[4096]; mpz_t temp; mpz_array_init(c1_full[0], 4096, 512); mpz_init(temp); for(i=0; i<NUM_PRIME; i++) { fwd_ntt_q(c1[i], i); coefficient_mul_q(sk[i], c1[i], sk_mul_c1[i], i); inv_ntt_q(sk_mul_c1[i], i); coefficient_add_q(c0[i], sk_mul_c1[i], sk_mul_c1[i], i); // sk_mul_c1 <-- c0 + sk_mul_c1 } inverse_crt_length7(sk_mul_c1, c1_full); centerlift(c1_full); round_tx(c1_full); // round t*c/q for(i=4095; i>=0; i--) { //if(mpz_cmp(c1_full[i], p_full_length7_by4)>=0 && mpz_cmp(c1_full[i], p_full_length7_by4_mul3)<0) //m[i]=1; //else //m[i]=0; mpz_mod_ui(temp, c1_full[i], t); // temp = c1_full[i] % t m[i] = mpz_get_ui(temp); } mpz_clear(c1_full[0]); } int FV_add(long long int c10[][4096], long long int c11[][4096], long long int c20[][4096], long long int c21[][4096], long long int c0[][4096], long long int c1[][4096]) { int i; for(i=0; i<NUM_PRIME; i++) { poly_add_q(c10[i], c20[i], c0[i], i); poly_add_q(c11[i], c21[i], c1[i], i); } } int FV_sub(long long int c10[][4096], long long int c11[][4096], long long int c20[][4096], long long int c21[][4096], long long int c0[][4096], long long int c1[][4096]) { int i; for(i=0; i<NUM_PRIME; i++) { poly_sub_q(c10[i], c20[i], c0[i], i); poly_sub_q(c11[i], c21[i], c1[i], i); } } void print_ciphertext_extra_old(long long int c[][4096], char filename[]) { int i, j; FILE *fp; fp = fopen(filename, "w"); for(i=0; i<6; i++) { for(j=0; j<1024; j++) { //printf("%lu %lu\n", c[i][2*j], c[i][2*j+1]); //fprintf(fp, "i = %d\n", j); fprintf(fp, "%lu %lu\n", c[i][j], c[i][j+2048]); } for(j=0; j<1024; j++) { //printf("%lu %lu\n", c[i][2*j], c[i][2*j+1]); //fprintf(fp, "i = %d\n", j); fprintf(fp, "%lu %lu\n", c[i][j+1024], c[i][j+1024+2048]); } /* for(j=0; j<2048; j++) { //printf("%lu %lu\n", c[i][2*j], c[i][2*j+1]); fprintf(fp, "i = %d\n", j); fprintf(fp, "%lu %lu \n", c[i][2*j], c[i][2*j+1]); } */ } fclose(fp); } void print_ciphertext_extra(long long int c[], char filename[]) { int i, j; FILE *fp; fp = fopen(filename, "w"); for(j=0; j<1024; j++) { //printf("%lu %lu\n", c[i][2*j], c[i][2*j+1]); //fprintf(fp, "i = %d\n", j); fprintf(fp, "%lu %lu\n", c[j], c[j+2048]); } for(j=0; j<1024; j++) { //printf("%lu %lu\n", c[i][2*j], c[i][2*j+1]); //fprintf(fp, "i = %d\n", j); fprintf(fp, "%lu %lu\n", c[j+1024], c[j+1024+2048]); } /* for(j=0; j<2048; j++) { //printf("%lu %lu\n", c[i][2*j], c[i][2*j+1]); fprintf(fp, "i = %d\n", j); fprintf(fp, "%lu %lu \n", c[i][2*j], c[i][2*j+1]); } */ fclose(fp); } int FV_mul(long long int c10[][4096], long long int c11[][4096], long long int c20[][4096], long long int c21[][4096], long long int c0[][4096], long long int c1[][4096]) { int i, j, index; FILE *fp; long long int c10_QL[NUM_PRIME_EXT][4096], c11_QL[NUM_PRIME_EXT][4096], c20_QL[NUM_PRIME_EXT][4096], c21_QL[NUM_PRIME_EXT][4096], c2[NUM_PRIME_EXT][4096]; long long int c10_mul_c20[NUM_PRIME_EXT][4096], c10_mul_c21[NUM_PRIME_EXT][4096], c11_mul_c20[NUM_PRIME_EXT][4096], c11_mul_c21[NUM_PRIME_EXT][4096]; mpz_t c10_full[4096], c11_full[4096], c20_full[4096], c21_full[4096]; mpz_t c0_full[4096], c1_full[4096], c2_full[4096]; long long int primrt; int num_thread; mpz_array_init(c10_full[0], 4096, 512); mpz_array_init(c11_full[0], 4096, 512); mpz_array_init(c20_full[0], 4096, 512); mpz_array_init(c21_full[0], 4096, 512); mpz_array_init(c0_full[0], 4096, 512); mpz_array_init(c1_full[0], 4096, 512); mpz_array_init(c2_full[0], 4096, 512); inverse_crt_length7(c10, c10_full); inverse_crt_length7(c11, c11_full); inverse_crt_length7(c20, c20_full); inverse_crt_length7(c21, c21_full); centerlift(c10_full); centerlift(c11_full); centerlift(c20_full); centerlift(c21_full); map_to_QL(c10_full, c10_QL); map_to_QL(c11_full, c11_QL); map_to_QL(c20_full, c20_QL); map_to_QL(c21_full, c21_QL); char fname[100]; strcpy(fname, "c0_0_extra"); print_ciphertext_extra(c10_QL[6], fname); int x, y; // input ciphertext address x=200; y=200+12; int C0_address, C1_address, C2_address; // output ciphertext address C0_address = 104; C1_address = 104+13; C2_address = 104+13*2; printf("\nHE MULT\n\n"); //#pragma omp parallel for for(i=0; i<NUM_PRIME_EXT; i++) { fwd_ntt_q(c10_QL[i], i); { // first 6 shares taken from location DDR[x+. ] and the NTTs are stored in DDR[0+.] if(i==0) { for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",x+j,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n", i); printf("0\t0\t0\t0\t0\t0\n"); printf("17\t0\t0\t0\t0\t0\n", i); for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j,j); } printf("\n---------------------\n"); } // next 7 shares taken from location DDR[6+.] and the NTTs are stored in DDR[6+.] if(i==0) { for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j,j-NUM_PRIME); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n", i); printf("0\t0\t0\t0\t0\t0\n"); printf("17\t0\t0\t0\t0\t1\n", i); for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j,j-NUM_PRIME); } printf("\n---------------------\n"); } } ////// if(i==NUM_PRIME_EXT-1) { strcpy(fname, "c10_QL_NTT"); print_ciphertext_extra(c10_QL[0], fname); } fwd_ntt_q(c11_QL[i], i); { // first 6 shares taken from location DDR[x+. ] and the NTTs are stored in DDR[0+.] if(i==0) { for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",x+6+j,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n", i); printf("0\t0\t0\t0\t0\t0\n"); printf("17\t0\t0\t0\t0\t0\n", i); for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+13,j); } printf("\n---------------------\n"); } // next 7 shares taken from location DDR[6+.] and the NTTs are stored in DDR[6+.] if(i==0) { for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+13,j-NUM_PRIME); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n", i); printf("0\t0\t0\t0\t0\t0\n"); printf("17\t0\t0\t0\t0\t1\n", i); for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+13,j-NUM_PRIME); } printf("\n---------------------\n"); } } ////// printf("C2 ======\n"); fwd_ntt_q(c20_QL[i], i); { // first 6 shares taken from location DDR[x+. ] and the NTTs are stored in DDR[0+.] if(i==0) { for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",y+j,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n", i); printf("0\t0\t0\t0\t0\t0\n"); printf("17\t0\t0\t0\t0\t0\n", i); for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+26,j); } printf("\n---------------------\n"); } // next 7 shares taken from location DDR[6+.] and the NTTs are stored in DDR[6+.] if(i==0) { for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+13*2,j-NUM_PRIME); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n", i); printf("0\t0\t0\t0\t0\t0\n"); printf("17\t0\t0\t0\t0\t1\n", i); for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+13*2,j-NUM_PRIME); } printf("\n---------------------\n"); } } ///// if(i==NUM_PRIME_EXT-1) { strcpy(fname, "c20_QL_NTT"); print_ciphertext_extra(c20_QL[0], fname); } fwd_ntt_q(c21_QL[i], i); { // first 6 shares taken from location DDR[x+. ] and the NTTs are stored in DDR[0+.] if(i==0) { for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",y+6+j,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n", i); printf("0\t0\t0\t0\t0\t0\n"); printf("17\t0\t0\t0\t0\t0\n", i); for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+13*3,j); } printf("\n---------------------\n"); } // next 7 shares taken from location DDR[6+.] and the NTTs are stored in DDR[6+.] if(i==0) { for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+13*3,j-NUM_PRIME); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n", i); printf("0\t0\t0\t0\t0\t0\n"); printf("17\t0\t0\t0\t0\t1\n", i); for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+13*3,j-NUM_PRIME); } printf("\n---------------------\n"); } } // c10_QL -- [0-12] // c11_QL -- [13-25] // c20_QL -- [26-38] // c21_QL -- [39-51] // c10_QL * c20_QL -- [52-64] // c10_QL * c21_QL -- [65-77] // c11_QL * c20_QL -- [78-90] // c11_QL * c21_QL -- [91-103] int iaddress1, iaddress2, oaddress; printf("poly mul c10_QL[i], c20_QL\n"); coefficient_mul_q(c10_QL[i], c20_QL[i], c10_mul_c20[i], i); { iaddress1 = 0; iaddress2 = 26; oaddress = 52; // first 6 multiplications if(i==0) { for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t1\t0\n",j+iaddress2,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("19\t0\t0\t0\t0\t0\n"); for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+oaddress,j); } } // last 7 multiplications if(i==0) { for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t1\t0\n",j+iaddress2,j-NUM_PRIME); } printf("0\t0\t0\t0\t0\t0\n"); printf("19\t0\t0\t0\t0\t1\n"); for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+oaddress,j-NUM_PRIME); } } } printf("poly mul c10_QL[i], c21_QL\n"); coefficient_mul_q(c10_QL[i], c21_QL[i], c10_mul_c21[i], i); { iaddress1 = 0; iaddress2 = 39; oaddress = 65; // first 6 multiplications if(i==0) { for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t1\t0\n",j+iaddress2,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("19\t0\t0\t0\t0\t0\n"); for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+oaddress,j); } } // last 7 multiplications if(i==0) { for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t1\t0\n",j+iaddress2,j-NUM_PRIME); } printf("0\t0\t0\t0\t0\t0\n"); printf("19\t0\t0\t0\t0\t1\n"); for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+oaddress,j-NUM_PRIME); } } } printf("poly mul c11_QL[i], c20_QL\n"); coefficient_mul_q(c11_QL[i], c20_QL[i], c11_mul_c20[i], i); { iaddress1 = 13; iaddress2 = 26; oaddress = 78; // first 6 multiplications if(i==0) { for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t1\t0\n",j+iaddress2,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("19\t0\t0\t0\t0\t0\n"); for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+oaddress,j); } } // last 7 multiplications if(i==0) { for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t1\t0\n",j+iaddress2,j-NUM_PRIME); } printf("0\t0\t0\t0\t0\t0\n"); printf("19\t0\t0\t0\t0\t1\n"); for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+oaddress,j-NUM_PRIME); } } } printf("poly mul c11_QL[i], c21_QL\n"); coefficient_mul_q(c11_QL[i], c21_QL[i], c11_mul_c21[i], i); { iaddress1 = 13; iaddress2 = 39; oaddress = 91; // first 6 multiplications if(i==0) { for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t1\t0\n",j+iaddress2,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("19\t0\t0\t0\t0\t0\n"); for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+oaddress,j); } } // last 7 multiplications if(i==0) { for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t1\t0\n",j+iaddress2,j-NUM_PRIME); } printf("0\t0\t0\t0\t0\t0\n"); printf("19\t0\t0\t0\t0\t1\n"); for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+oaddress,j-NUM_PRIME); } } } // c10_QL * c20_QL -- [52-64] // c10_QL * c21_QL -- [65-77] // c11_QL * c20_QL -- [78-90] // c11_QL * c21_QL -- [91-103] printf("INTT c10_mul_c20[i] --> c0\n"); inv_ntt_q(c10_mul_c20[i], i); // c0[i] = c10*c20 mod q[i] poly_copy(c10_mul_c20[i], c0[i]); { iaddress1 = 52; if(i==0) { for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n"); printf("0\t0\t0\t0\t0\t0\n"); printf("18\t0\t0\t0\t0\t0\n"); for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+C0_address,j); } for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j-NUM_PRIME); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n"); printf("0\t0\t0\t0\t0\t0\n"); printf("18\t0\t0\t0\t0\t1\n"); for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n",j+C0_address,j-NUM_PRIME); } } } //strcpy(fname, "c0_final"); //print_ciphertext_extra(c0[6], fname); printf("c10_mul_c21 + c11_mul_c20 --> c1\n"); coefficient_add_q(c10_mul_c21[i], c11_mul_c20[i], c1[i], i); inv_ntt_q(c1[i], i); // c1[i] = c10*c21 mod q[i] { iaddress1 = 65; iaddress2 = 78; if(i==0) { for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t1\t0\n",j+iaddress2,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("20\t0\t0\t0\t0\t0\n"); printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n"); printf("0\t0\t0\t0\t0\t0\n"); printf("18\t0\t0\t0\t0\t0\n"); for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n", j+C1_address, j); } for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t1\t0\n",j+iaddress2,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("20\t0\t0\t0\t0\t1\n"); printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n"); printf("0\t0\t0\t0\t0\t0\n"); printf("18\t0\t0\t0\t0\t1\n"); for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n", j+C1_address, j-NUM_PRIME); } } } printf("INTT c11_mul_c21 --> c2\n"); inv_ntt_q(c11_mul_c21[i], i); // c2[i] = c11*c21 mod q[i] poly_copy(c11_mul_c21[i], c2[i]); { iaddress1 = 91; if(i==0) { for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n"); printf("0\t0\t0\t0\t0\t0\n"); printf("18\t0\t0\t0\t0\t0\n"); for(j=0; j<NUM_PRIME; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n", j+C2_address,j); } for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("4\t%d\t0\t%d\t0\t0\n",j+iaddress1,j-NUM_PRIME); } printf("0\t0\t0\t0\t0\t0\n"); printf("16\t0\t0\t0\t0\t0\n"); printf("0\t0\t0\t0\t0\t0\n"); printf("18\t0\t0\t0\t0\t1\n"); for(j=NUM_PRIME; j<NUM_PRIME_EXT; j++) { printf("0\t0\t0\t0\t0\t0\n"); printf("3\t%d\t0\t%d\t0\t0\n", j+C2_address,j-NUM_PRIME); } } } } ///////////////////////////////////////// fp = fopen("c00_mul_c10", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", c0[i][j], c0[i][j+2048]); } } fclose(fp); fp = fopen("c01_mul_c11", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", c2[i][j], c2[i][j+2048]); } } fclose(fp); fp = fopen("c01_mul_c10_plus", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", c1[i][j], c1[i][j+2048]); } } fclose(fp); ///////////////////////////////////////////////////////////////////////////////////// fp = fopen("c00_mul_c10_Q", "w"); for(i=6; i<13; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", c0[i][j], c0[i][j+2048]); } } fclose(fp); fp = fopen("c01_mul_c11_Q", "w"); for(i=6; i<13; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", c2[i][j], c2[i][j+2048]); } } fclose(fp); fp = fopen("c01_mul_c10_plus_Q", "w"); for(i=6; i<13; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", c1[i][j], c1[i][j+2048]); } } fclose(fp); // C0_shares --> [0..5] // C1_shares --> [6..11] // C2_shares --> [12..23] printf("CRT inputs C0\n"); for(i=0; i<13; i++) printf("%d \t", c0[i][0]); printf("\n\n"); printf("CRT inputs C2\n"); for(i=0; i<13; i++) printf("%d \t", c2[i][0]); printf("\n\n"); inverse_crt_length15(c0, c0_full); centerlift_QL(c0_full); round_tx_mod(c0_full); compute_shares(c0_full, c0); { printf("0\t0\t0\t0\t0\t0\n"); printf("6\t%d\t0\t0\t0\t0\n", C0_address); } fp = fopen("C0_SHARES", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", c0[i][j], c0[i][j+2048]); } } fclose(fp); inverse_crt_length15(c1, c1_full); centerlift_QL(c1_full); round_tx_mod(c1_full); compute_shares(c1_full, c1); { printf("0\t0\t0\t0\t0\t0\n"); printf("6\t%d\t6\t0\t0\t0\n", C1_address); } inverse_crt_length15(c2, c2_full); centerlift_QL(c2_full); round_tx_mod(c2_full); centerlift(c2_full); { printf("0\t0\t0\t0\t0\t0\n"); printf("7\t%d\t12\t0\t0\t0\n", C2_address); } fp = fopen("c0_shares", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { //printf("%lu %lu\n", c[i][2*j], c[i][2*j+1]); //fprintf(fp, "i = %d\n", j); fprintf(fp, "%lu %lu \n", c0[i][2*j], c0[i][2*j+1]); } } fclose(fp); FV_relin(c0, c1, c2_full); mpz_clear(c10_full[0]); mpz_clear(c11_full[0]); mpz_clear(c20_full[0]); mpz_clear(c21_full[0]); mpz_clear(c0_full[0]); mpz_clear(c1_full[0]); mpz_clear(c2_full[0]); } int FV_relin(long long int c0_shares[][4096], long long int c1_shares[][4096], mpz_t c2_full[]) { int i, j; FILE *fp; mpz_t cwd0[4096], cwd1[4096], cwd2[4096], cwd3[4096], cwd4[4096]; mpz_array_init(cwd0[0], 4096, 256); mpz_array_init(cwd1[0], 4096, 256); mpz_array_init(cwd2[0], 4096, 256); mpz_array_init(cwd3[0], 4096, 256); mpz_array_init(cwd4[0], 4096, 256); long long int rlk0_mul_cwd[NUM_PRIME][4096], rlk1_mul_cwd[NUM_PRIME][4096]; long long int cwd0_shares[NUM_PRIME][4096], cwd1_shares[NUM_PRIME][4096], cwd2_shares[NUM_PRIME][4096], cwd3_shares[NUM_PRIME][4096], cwd4_shares[NUM_PRIME][4096]; long long int temp[NUM_PRIME][4096]; word_decomp(c2_full, cwd0, cwd1, cwd2, cwd3, cwd4); compute_shares(cwd0, cwd0_shares); compute_shares(cwd1, cwd1_shares); compute_shares(cwd2, cwd2_shares); compute_shares(cwd3, cwd3_shares); compute_shares(cwd4, cwd4_shares); char fname[100]; /* strcpy(fname, "rlk00_for_HW"); print_ciphertext_extra_old(rlk00, fname); strcpy(fname, "rlk01_for_HW"); print_ciphertext_extra_old(rlk01, fname); strcpy(fname, "rlk10_for_HW"); print_ciphertext_extra_old(rlk10, fname); strcpy(fname, "rlk11_for_HW"); print_ciphertext_extra_old(rlk11, fname); strcpy(fname, "c0_final"); print_ciphertext_extra(cwd0_shares[5], fname); */ fp = fopen("rlk00_shares", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", rlk00[i][j], rlk00[i][j+2048]); } } fclose(fp); fp = fopen("rlk01_shares", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", rlk01[i][j], rlk01[i][j+2048]); } } fclose(fp); fp = fopen("rlk10_shares", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", rlk10[i][j], rlk10[i][j+2048]); } } fclose(fp); fp = fopen("rlk11_shares", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", rlk11[i][j], rlk11[i][j+2048]); } } fclose(fp); /* fp = fopen("cwd0_shares", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", cwd0_shares[i][j], cwd0_shares[i][j+2048]); } } fclose(fp); fp = fopen("cwd1_shares", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", cwd1_shares[i][j], cwd1_shares[i][j+2048]); } } fclose(fp); fp = fopen("cwd0", "r"); for(i=0; i<6; i++) { for(j=0; j<1024; j++) { fscanf(fp, "%lu %lu", &cwd0_shares[i][j], &cwd0_shares[i][j+2048]); } for(j=0; j<1024; j++) { fscanf(fp, "%lu %lu", &cwd0_shares[i][j+1024], &cwd0_shares[i][j+1024+2048]); } } fclose(fp); fp = fopen("cwd1", "r"); for(i=0; i<6; i++) { for(j=0; j<1024; j++) { fscanf(fp, "%lu %lu", &cwd1_shares[i][j], &cwd1_shares[i][j+2048]); } for(j=0; j<1024; j++) { fscanf(fp, "%lu %lu", &cwd1_shares[i][j+1024], &cwd1_shares[i][j+1024+2048]); } } fclose(fp); */ /* fp = fopen("cwd0", "r"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fscanf(fp, "%lu %lu\n", &cwd0_shares[i][j], &cwd0_shares[i][j+2048]); } } fclose(fp); fp = fopen("cwd1", "r"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fscanf(fp, "%lu %lu\n", &cwd1_shares[i][j], &cwd1_shares[i][j+2048]); } } fclose(fp); */ for(i=0; i<NUM_PRIME; i++) { fwd_ntt_q(cwd0_shares[i], i); fwd_ntt_q(cwd1_shares[i], i); } fp = fopen("cwd0_shares_ntt", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", cwd0_shares[i][j], cwd0_shares[i][j+2048]); } } fclose(fp); for(i=0; i<NUM_PRIME; i++) { //fwd_ntt_q(cwd0_shares[i], i); coefficient_mul_q(rlk00[i], cwd0_shares[i], rlk0_mul_cwd[i], i); //fwd_ntt_q(cwd1_shares[i], i); coefficient_mul_q(rlk10[i], cwd1_shares[i], temp[i], i); coefficient_add_q(rlk0_mul_cwd[i], temp[i], rlk0_mul_cwd[i], i); // rlk0_mul_cwd[i] = rlk00[i]*cwd0_shares[i] + rlk10[i]*cwd1_shares[i] coefficient_mul_q(rlk01[i], cwd0_shares[i], rlk1_mul_cwd[i], i); coefficient_mul_q(rlk11[i], cwd1_shares[i], temp[i], i); coefficient_add_q(rlk1_mul_cwd[i], temp[i], rlk1_mul_cwd[i], i); inv_ntt_q(rlk0_mul_cwd[i], i); inv_ntt_q(rlk1_mul_cwd[i], i); coefficient_add_q(c0_shares[i], rlk0_mul_cwd[i], c0_shares[i], i); // c0_shares[i] = c0_shares[i]+ sum[rlk_i0*cwd_i] coefficient_add_q(c1_shares[i], rlk1_mul_cwd[i], c1_shares[i], i); // c1_shares[i] = c1_shares[i]+ sum[rlk_i1*cwd_i] } fp = fopen("rlk0_mul_cwd", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", rlk0_mul_cwd[i][j], rlk0_mul_cwd[i][j+2048]); } } fclose(fp); fp = fopen("cmul0_shares", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", c0_shares[i][j], c0_shares[i][j+2048]); } } fclose(fp); fp = fopen("cmul1_shares", "w"); for(i=0; i<6; i++) { for(j=0; j<2048; j++) { fprintf(fp, "%lu %lu\n", c1_shares[i][j], c1_shares[i][j+2048]); } } fclose(fp); /* fp = fopen("c0_shares_new", "r"); for(i=0; i<6; i++) { for(j=0; j<1024; j++) { fscanf(fp, "%lu %lu", &c0_shares[i][j], &c0_shares[i][j+2048]); } for(j=0; j<1024; j++) { fscanf(fp, "%lu %lu", &c0_shares[i][j+1024], &c0_shares[i][j+1024+2048]); } } fclose(fp); fp = fopen("c1_shares_new", "r"); for(i=0; i<6; i++) { for(j=0; j<1024; j++) { fscanf(fp, "%lu %lu", &c1_shares[i][j], &c1_shares[i][j+2048]); } for(j=0; j<1024; j++) { fscanf(fp, "%lu %lu", &c1_shares[i][j+1024], &c1_shares[i][j+1024+2048]); } } fclose(fp); */ mpz_clear(cwd0[0]); mpz_clear(cwd1[0]); mpz_clear(cwd2[0]); mpz_clear(cwd3[0]); mpz_clear(cwd4[0]); } int word_decomp(mpz_t c[], mpz_t cwd0[], mpz_t cwd1[], mpz_t cwd2[], mpz_t cwd3[], mpz_t cwd4[]) { int i, j; int sign; mpz_t mask; mpz_init(mask); mpz_set_str(mask, "2475880078570760549798248447", 10); // mask=2^32-1 mpz_t two_to_32; mpz_init(two_to_32); mpz_set_str(two_to_32, "2475880078570760549798248448", 10); mpz_t two_to_31; mpz_init(two_to_31); mpz_set_str(two_to_31, "1237940039285380274899124224", 10); mpz_t chunk; mpz_init(chunk); int thread_num; //#pragma omp parallel for private(thread_num, sign, j) for(i=0; i<4096; i++) { //thread_num = omp_get_thread_num(); sign=0; if(mpz_cmp_ui(c[i], 0)<0) { sign = 1; mpz_ui_sub(c[i], 0, c[i]); } for(j=0; j<2; j++) { mpz_and(chunk, c[i], mask); mpz_sub(c[i], c[i], chunk); mpz_fdiv_q_2exp(c[i], c[i], 91); // c[i] = c[i]>>91 /* if(mpz_cmp(chunk, two_to_31)>0) // if chunk > 2^31 { mpz_sub(chunk, chunk, two_to_32); // chunk = chunk- 2^32 mpz_add_ui(c[i], c[i], 1); } */ if(sign) mpz_ui_sub(chunk, 0, chunk); // chunk = -chunk if(j==0) mpz_mod(cwd0[i], chunk, p_full_length7); if(j==1) mpz_mod(cwd1[i], chunk, p_full_length7); if(j==2) mpz_mod(cwd2[i], chunk, p_full_length7); if(j==3) mpz_mod(cwd3[i], chunk, p_full_length7); if(j==4) mpz_mod(cwd4[i], chunk, p_full_length7); } } } int word_decomp_32bit(mpz_t c[], mpz_t cwd0[], mpz_t cwd1[], mpz_t cwd2[], mpz_t cwd3[], mpz_t cwd4[]) { int i, j; int sign; mpz_t mask; mpz_init(mask); mpz_set_str(mask, "4294967295", 10); // mask=2^32-1 mpz_t two_to_32; mpz_init(two_to_32); mpz_set_str(two_to_32, "4294967296", 10); mpz_t two_to_31; mpz_init(two_to_31); mpz_set_str(two_to_31, "2147483648", 10); mpz_t chunk; mpz_init(chunk); int thread_num; //#pragma omp parallel for private(thread_num, sign, j) for(i=0; i<4096; i++) { //thread_num = omp_get_thread_num(); sign=0; if(mpz_cmp_ui(c[i], 0)<0) { sign = 1; mpz_ui_sub(c[i], 0, c[i]); } for(j=0; j<5; j++) { mpz_and(chunk, c[i], mask); mpz_sub(c[i], c[i], chunk); mpz_fdiv_q_2exp(c[i], c[i], 32); // c[i] = c[i]>>32 if(mpz_cmp(chunk, two_to_31)>0) // if chunk > 2^31 { mpz_sub(chunk, chunk, two_to_32); // chunk = chunk- 2^32 mpz_add_ui(c[i], c[i], 1); } if(sign) mpz_ui_sub(chunk, 0, chunk); // chunk = -chunk if(j==0) mpz_mod(cwd0[i], chunk, p_full_length7); if(j==1) mpz_mod(cwd1[i], chunk, p_full_length7); if(j==2) mpz_mod(cwd2[i], chunk, p_full_length7); if(j==3) mpz_mod(cwd3[i], chunk, p_full_length7); if(j==4) mpz_mod(cwd4[i], chunk, p_full_length7); } } } void compute_shares(mpz_t a[], long long int a_shares[][4096]) { int i, j; int thread_num; mpz_t temp; mpz_init(temp); //#pragma omp parallel for private(thread_num, j) for(i=0; i<4096; i++) { //thread_num = omp_get_thread_num(); for(j=0; j<NUM_PRIME; j++) { mpz_mod_ui(temp, a[i], p[j]); a_shares[j][i] = mpz_get_ui(temp); } } } void compute_mod(mpz_t a[],long long int b[], int prime_index) { int i; mpz_t temp; mpz_init(temp); for(i=0; i<4096; i++) { mpz_mod_ui(temp, a[i], p[prime_index]); b[i] = mpz_get_ui(temp); } } int centerlift(mpz_t a[]) { int i; //#pragma omp parallel for for(i=0; i<4096; i++) { if(mpz_cmp(a[i], p_full_length7_by2)>0) mpz_sub(a[i], a[i], p_full_length7); // a[i] = a[i]-q } } int centerlift_QL(mpz_t a[]) { int i; //#pragma omp parallel for for(i=0; i<4096; i++) { if(mpz_cmp(a[i], p_full_length15_by2)>0) mpz_sub(a[i], a[i], p_full_length15); // a[i] = a[i]-q } } int map_to_QL(mpz_t a[], long long int b[][4096]) { int i, j; int thread_num; mpz_t temp; mpz_init(temp); //#pragma omp parallel for private(thread_num, j) for(i=0; i<4096; i++) { mpz_mod(a[i], a[i], p_full_length15); for(j=0; j<NUM_PRIME_EXT; j++) { mpz_mod_ui(temp, a[i], p[j]); b[j][i] = mpz_get_ui(temp); } } } void message_gen(int m[]) { FILE *fm; int i, r1, r2; for(i=0;i<4096;i++) { m[i]=0; } m[0]=random()%2; } void poly_copy(long long int a[], long long int b[]) { int i; for(i=0; i<4096; i++) b[i] = a[i]; } /* void message_encrypt(int m, mpz_t c[]) { int message[4096]; int i; for(i=0; i<4096; i++) message[i] = 0; message[0] = m; YASHE_enc(message, c); } */
graphProcessing.h
/* FINISH TEMPFLATPATH CODE */ #define LP 1 #define PERFDEBUG 0 //#define FULLDEBUG 1 #ifdef _OPENMP #include <omp.h> #endif #include <boost/regex.hpp> #include <iostream> #include <fstream> #include <string> #include <assert.h> #include <staticCFG.h> /** *@file graphProcessing.h *Brief Overview of Algorithm: *********************** *Current Implementation *********************** *This implementation uses BOOSTs graph structure to analyze the paths of the graph *The path analyzer sends the user paths to be evaluated by the "analyzePath" function that is user defined ************************** *Further Improvements: TODO ************************** @todo utilize BOOST visitors to take advantage of the BOOST graph structures abilities *************** *Contact Info *************** *Finally, blame can be assigned to and questions can be forwarded to the author, though response is not guaranteed *if I'm still at Lawrence *hoffman34 AT llnl DOT gov *@author Michael Hoffman */ #include <boost/graph/adjacency_list.hpp> #include <boost/bind.hpp> #include <boost/foreach.hpp> #include <boost/tuple/tuple.hpp> #include <boost/graph/graphviz.hpp> #include <boost/graph/dominator_tree.hpp> #include <boost/graph/reverse_graph.hpp> #include <boost/graph/transpose_graph.hpp> #include <boost/algorithm/string.hpp> #include <vector> #include <algorithm> #include <utility> #include <iostream> #include <sys/time.h> #include <sys/resource.h> #include <sys/time.h> template <class CFG> class SgGraphTraversal { public: typedef typename boost::graph_traits<CFG>::vertex_descriptor Vertex; typedef typename boost::graph_traits<CFG>:: edge_descriptor Edge; void constructPathAnalyzer(CFG* g, bool unbounded=false, Vertex end=0, Vertex begin=0, bool ns = true); virtual void analyzePath(std::vector<Vertex>& pth) = 0; std::vector<int> getInEdges(int& node, CFG*& g); std::vector<int> getOutEdges(int& node, CFG*& g); int getTarget(int& n, CFG*& g); int getSource(int& n, CFG*& g); std::map<Vertex, int> vertintmap; std::map<Edge, int> edgeintmap; std::map<int, Vertex> intvertmap; std::map<int, Edge> intedgemap; SgGraphTraversal(); virtual ~SgGraphTraversal(); SgGraphTraversal( SgGraphTraversal &); SgGraphTraversal &operator=( SgGraphTraversal &); int pathnum; void firstPrepGraph(CFG*& g); private: int normals; int abnormals; bool needssafety; int recursed; int checkedfound; // typedef typename boost::graph_traits<CFG>::vertex_descriptor Vertex; // typedef typename boost::graph_traits<CFG>:: edge_descriptor Edge; // std::vector<int> getInEdges(int& node, CFG*& g); // std::vector<int> getOutEdges(int& node, CFG*& g); void prepareGraph(CFG*& g); void findClosuresAndMarkersAndEnumerate(CFG*& g); // void constructPathAnalyzer(CFG* g, bool unbounded=false, Vertex end=0, Vertex begin=0, bool ns = true); // virtual void analyzePath(std::vector<Vertex>& pth) = 0; // void firstPrepGraph(CFG*& g); int stoppedpaths; std::set<std::vector<int> > traversePath(int begin, int end, CFG*& g, bool loop=false); std::set<std::vector<int> > uTraversePath(int begin, int end, CFG*& g, bool loop, std::map<int, std::vector<std::vector<int> > >& localLoops); std::vector<std::vector<int> > bfsTraversePath(int begin, int end, CFG*& g, bool loop=false); std::vector<int> unzipPath(std::vector<int>& path, CFG*& g, int start, int end); std::vector<int> zipPath(std::vector<int>& path, CFG*& g, int start, int end); std::vector<int> zipPath2(std::vector<int>& path, CFG*& g); void printCFGNode(int& cf, std::ofstream& o); void printCFGNodeGeneric(int& cf, std::string prop, std::ofstream& o); void printCFGEdge(int& cf, CFG*& cfg, std::ofstream& o); void printHotness(CFG*& g); void printPathDot(CFG*& g); void computeOrder(CFG*& g, const int& begin); void computeSubGraphs(const int& begin, const int &end, CFG*& g, int depthDifferential); //int getTarget(int& n, CFG*& g); //int getSource(int& n, CFG*& g); std::vector<int> sources; std::vector<int> sinks; std::vector<int> recursiveLoops; std::vector<int> recurses; std::map<int, int> ptsNum; bool borrowed; std::set<int> badloop; std::map<int, std::vector<std::vector<int> > > totalLoops; // int pathnum; std::map<int, std::string> nodeStrings; int sourcenum; unsigned long long evaledpaths; int badpaths; int workingthreadnum; bool workingthread; std::map<int, std::set<std::vector<int> > > loopStore; std::vector<std::vector<int> > pathStore; std::map<int, std::vector<int> > subpathglobal; std::map<std::vector<int>, int> subpathglobalinv; int nextsubpath; std::vector<int> orderOfNodes; // std::map<Vertex, int> vertintmap; // std::map<Edge, int> edgeintmap; // std::map<int, Vertex> intvertmap; // std::map<int, Edge> intedgemap; std::vector<std::map<Vertex, Vertex> > SubGraphGraphMap; std::vector<std::map<Vertex, Vertex> > GraphSubGraphMap; std::vector<CFG*> subGraphVector; void getVertexPath(std::vector<int> path, CFG*& g, std::vector<Vertex>& vertexPath ); void storeCompact(std::vector<int> path); int nextNode; int nextEdge; std::vector<int> markers; std::vector<int> closures; std::map<int, int> markerIndex; std::map<int, std::vector<int> > pathsAtMarkers; typedef typename boost::graph_traits<CFG>::vertex_iterator vertex_iterator; typedef typename boost::graph_traits<CFG>::out_edge_iterator out_edge_iterator; typedef typename boost::graph_traits<CFG>::in_edge_iterator in_edge_iterator; typedef typename boost::graph_traits<CFG>::edge_iterator edge_iterator; bool bound; // SgGraphTraversal(); // virtual ~SgGraphTraversal(); // SgGraphTraversal( SgGraphTraversal &); // SgGraphTraversal &operator=( SgGraphTraversal &); }; template<class CFG> SgGraphTraversal<CFG>:: SgGraphTraversal() { } template<class CFG> SgGraphTraversal<CFG> & SgGraphTraversal<CFG>:: operator=( SgGraphTraversal &other) { return *this; } #ifndef SWIG template<class CFG> SgGraphTraversal<CFG>:: ~SgGraphTraversal() { } #endif /** Gets the source of an edge SgGraphTraversal::getSource Input: @param[edge] int& integer representation of edge in question @param[g] CFG*& the CFG used */ template<class CFG> inline int SgGraphTraversal<CFG>:: getSource(int& edge, CFG*& g) { Edge e = intedgemap[edge]; Vertex v = boost::source(e, *g); return(vertintmap[v]); } /** Gets the target of an edge SgGraphTraversal::getTarget Input: @param[edge] int& integer representation of edge in quesution @param[g] the CFG*& CFG used */ template<class CFG> inline int SgGraphTraversal<CFG>:: getTarget(int& edge, CFG*& g) { Edge e = intedgemap[edge]; Vertex v = boost::target(e, *g); return(vertintmap[v]); } /** Gets out edges with integer inputs, internal use only SgGraphTraversal::getInEdges Input: @param[node] int, integer representation of the node to get the in edges from @param[g] CFG* g, CFG */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: getInEdges(int& node, CFG*& g) { Vertex getIns = intvertmap[node]; std::vector<int> inedges; in_edge_iterator i, j; for (boost::tie(i, j) = boost::in_edges(getIns, *g); i != j; ++i) { inedges.push_back(edgeintmap[*i]); } return inedges; } /** Gets out edges with integer inputs, internal use only SgGraphTraversal::getOutEdges Input: @param[node] int, integer representation of the node to get the out edges from @param[g] CFG* g, CFG */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: getOutEdges(int &node, CFG*& g) { Vertex getOuts = intvertmap[node]; std::vector<int> outedges; out_edge_iterator i, j; for (boost::tie(i, j) = boost::out_edges(getOuts, *g); i != j; ++i) { outedges.push_back(edgeintmap[*i]); } return outedges; } /** Condenses paths, currently deprecated... Input: @param[pth] std::vector<int> the original path @param[g] CFG*, the ambient graph Output: zipped path */ template<class CFG> inline std::vector<int> SgGraphTraversal<CFG>:: zipPath2(std::vector<int>& pth, CFG*& g) { std::vector<int> npth; npth.push_back(pth[0]); for (int i = 1; i < pth.size()-1; i++) { if (find(closures.begin(), closures.end(), pth[i]) != closures.end()) { npth.push_back(pth[i]); } } npth.push_back(pth.back()); return npth; } /** Condenses paths to simply the first and last node and the ordered set of edges taken at nodes with more than 1 outedge Input: @param[pth] std::vector<int>, the original path @param[g] CFG*, the ambient graph @param[start] integer representation of the first node @param[end] integer representation of the last node */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: zipPath(std::vector<int>& pth, CFG*& g, int start, int end) { std::vector<int> subpath; std::vector<int> movepath; movepath.push_back(pth.front()); movepath.push_back(pth.back()); for (unsigned int qw = 0; qw < pth.size()-1; qw++) { if (find(markers.begin(), markers.end(), pth[qw]) != markers.end()) { std::vector<int> oeds = getOutEdges(pth[qw], g); for (unsigned int i = 0; i < oeds.size(); i++) { if (getTarget(oeds[i], g) == pth[qw+1]) { movepath.push_back(oeds[i]); } } } } return movepath; } /** unzips the paths zipped by zipPath Input: @param[pzipped] the zipped path @param[CFG] the ambient graph @param[start] the integer representation of the first node (used to check that zipPath is working correctly) @param[end] the integer representation of the end node */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: unzipPath(std::vector<int>& pzipped, CFG*& g, int start, int end) { ROSE_ASSERT(pzipped[0] == start && (pzipped[1] == end || end == -1)); std::vector<int> zipped; for (unsigned int i = 2; i < pzipped.size(); i++) { zipped.push_back(pzipped[i]); } std::vector<int> unzipped; unzipped.push_back(start); std::vector<int> oeds = getOutEdges(start, g); if (oeds.size() == 0) { return unzipped; } for (unsigned int i = 0; i < zipped.size(); i++) { oeds = getOutEdges(unzipped.back(), g); while (oeds.size() == 1) { if (getTarget(oeds[0], g) == end && unzipped.size() != 1) { unzipped.push_back(end); return unzipped; } unzipped.push_back(getTarget(oeds[0], g)); oeds = getOutEdges(unzipped.back(), g); } if (oeds.size() == 0) { return unzipped; } if (oeds.size() > 1 && (unzipped.back() != end || (unzipped.size() == 1 && unzipped.back() == end))) { ROSE_ASSERT(getSource(zipped[i], g) == unzipped.back()); unzipped.push_back(getTarget(zipped[i], g)); } } std::vector<int> oeds2 = getOutEdges(unzipped.back(), g); if (unzipped.back() != end && oeds2.size() != 0) { while (oeds2.size() == 1 && unzipped.back() != end) { unzipped.push_back(getTarget(oeds2[0], g)); oeds2 = getOutEdges(unzipped.back(), g); } } return unzipped; } /* Example Time Example: timeval tim; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); do_something_long(); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("%.6lf seconds elapsed\n", t2-t1); */ /** The function responsible for collecting all paths without loops, and all paths within lops that do not include other loops then sending those to uTraverse to assemble them into all paths with any combination of loops Input: @param[begin] integer representation of the first node @param[end] integer representation of the last node (or -1 if its not bounded) @param[g] CFG*, the ambient CFG @param[loop] boolean expressing whether or not we are calculating paths contained within a loop */ template<class CFG> std::vector<std::vector<int> > SgGraphTraversal<CFG>:: bfsTraversePath(int begin, int end, CFG*& g, bool loop) { //perfdebug allows for examining the speed of traversal #ifdef PERFDEBUG timeval tim; gettimeofday(&tim, NULL); double tim1 = tim.tv_sec+(tim.tv_usec/1000000.0); #endif bool recursedloop = loop; std::map<int, std::vector<std::vector<int> > > PtP; std::set<int> nodes; std::vector<std::vector<int> > pathContainer; //std::vector<std::vector<int> > oldPaths; std::vector<int> completedLoops; std::vector<std::vector<int> > npc; std::vector<int> bgpath; bgpath.push_back(begin); pathContainer.push_back(bgpath); std::vector<std::vector<int> > newPathContainer; std::vector<std::vector<int> > paths; std::vector<int> localLoops; std::map<int, std::vector<std::vector<int> > > globalLoopPaths; //std::cout << "at the while" << std::endl; //To keep while (pathContainer.size() != 0 /*|| oldPaths.size() != 0*/) { /* unsigned int mpc = 50000; if (pathContainer.size() == 0) { unsigned int mxl = 0; if (oldPaths.size() > mpc) { mxl = mpc/2; } else { mxl = oldPaths.size(); } for (unsigned int k = 0; k < mxl; k++) { pathContainer.push_back(oldPaths.back()); oldPaths.pop_back(); } } if (pathContainer.size() > mpc) { unsigned int j = 0; while (j < mpc) { npc.push_back(pathContainer.back()); pathContainer.pop_back(); j++; } oldPaths.insert(oldPaths.end(), pathContainer.begin(), pathContainer.end()); pathContainer = npc; npc.clear(); } */ //iterating through the currently discovered subpaths to build them up for (unsigned int i = 0; i < pathContainer.size(); i++) { std::vector<int> npth = pathContainer[i]; std::vector<int> oeds = getOutEdges(npth.back(), g); std::vector<int> ieds = getInEdges(npth.back(), g); npth = pathContainer[i]; oeds = getOutEdges(npth.back(), g); if ((!recursedloop && ((bound && npth.back() == end && npth.size() != 1) || (!bound && oeds.size() == 0))) || (recursedloop && npth.back() == end && npth.size() != 1)) { std::vector<int> newpth; newpth = (pathContainer[i]); std::vector<int> movepath = newpth;//zipPath(newpth, g); if (recursedloop && newpth.back() == end && newpth.size() != 1) { paths.push_back(movepath); } else if (!recursedloop) { if (bound && newpth.size() != 1 && newpth.back() == end) { paths.push_back(movepath); } else if (!bound) { paths.push_back(movepath); } } } else { std::vector<int> oeds = getOutEdges(pathContainer[i].back(), g); for (unsigned int j = 0; j < oeds.size(); j++) { int tg = getTarget(oeds[j], g); std::vector<int> newpath = (pathContainer[i]); //we split up paths into pieces so that they don't take up a lot of memory, basically this is when we run into a path //more than once, so we attach all paths that go to that path to that particular node via PtP if (nodes.find(tg) != nodes.end() && find(newpath.begin(), newpath.end(), tg) == newpath.end() && tg != end) { if (PtP.find(tg) == PtP.end()) { std::vector<int> nv; nv.push_back(tg); newPathContainer.push_back(nv); PtP[tg].push_back(/*zipPath(*(*/newpath);//, g, newpath.front(), newpath.back())); } else { PtP[tg].push_back(/*zipPath(*/newpath);//, g, newpath.front(), newpath.back())); } } else if (find(newpath.begin(), newpath.end(), getTarget(oeds[j], g)) == newpath.end() || getTarget(oeds[j], g) == end) { newpath.push_back(tg); std::vector<int> ieds = getInEdges(tg, g); if (ieds.size() > 1) {//find(closures.begin(), closures.end(), tg) != closures.end()) { nodes.insert(tg); } newPathContainer.push_back(newpath); } else if (tg == end && recursedloop) { newpath.push_back(tg); newPathContainer.push_back(newpath); } else {//if (find(newpath.begin(), newpath.end(), tg) != newpath.end() && tg != end) { std::vector<int> ieds = getInEdges(tg, g); if (ieds.size() > 1/*find(closures.begin(), closures.end(), tg) != closures.end()*/ && find(completedLoops.begin(), completedLoops.end(), tg) == completedLoops.end() /*&& find(localLoops.begin(), localLoops.end(), tg) == localLoops.end()*/ && find(recurses.begin(), recurses.end(), tg) == recurses.end()) { localLoops.push_back(tg); nodes.insert(tg); } // else if (find(recurses.begin(), recurses.end(), tg) != recurses.end()) { // } } //else { // std::cout << "problem" << std::endl; // ROSE_ASSERT(false); // } } } } pathContainer = newPathContainer; newPathContainer.clear(); } // std::cout << "done while" << std::endl; pathContainer.clear(); std::vector<std::vector<int> > finnpts; std::vector<std::vector<int> > npts; while (true) { if (paths.size() > 1000000) { std::cout << "too many paths, consider a subgraph" << std::endl; ROSE_ASSERT(false); } //#pragma omp parallel for schedule(guided) for (unsigned int qq = 0; qq < paths.size(); qq++) { std::vector<int> pq = paths[qq]; std::vector<int> qp; int ppf = paths[qq].front(); if (PtP.find(ppf) != PtP.end()) { for (unsigned int kk = 0; kk < PtP[ppf].size(); kk++) { std::vector<int> newpath = /*unzipPath(*/PtP[ppf][kk];//, g, PtP[ppf][kk][0], PtP[ppf][kk][1]); bool good = true; if (newpath.back() == newpath.front() && newpath.front() != begin && newpath.size() > 1) { good = false; } else { // if (find(pq.begin(), pq.end(), newpath.front()) != pq.end() && newpath.front() != begin) { // good = false; // } // else { for (unsigned int kk1 = 0; kk1 < newpath.size(); kk1++) { /* if (newpath.front() == newpath.back()) { good = false; break; } else */if (find(pq.begin(), pq.end(), newpath[kk1]) != pq.end() && newpath[kk1] != begin) { good = false; break; } } //} } if (good) { newpath.insert(newpath.end(), pq.begin(), pq.end()); #pragma omp critical { npts.push_back(newpath); } } } } else { std::vector<int> ppq = pq;// zipPath(pq, g, pq.front(), pq.back()); #pragma omp critical { finnpts.push_back(ppq); } } } if (npts.size() == 0) { break; } else { paths = npts; npts.clear(); } } paths = finnpts; finnpts.clear(); for (unsigned int k = 0; k < localLoops.size(); k++) { int lk = localLoops[k]; std::vector<std::vector<int> > loopp; if (loopStore.find(localLoops[k]) != loopStore.end()) { loopp.insert(loopp.end(), loopStore[localLoops[k]].begin(), loopStore[localLoops[k]].end()); } else { std::map<int, std::vector<std::vector<int> > > localLoopPaths; completedLoops.push_back(lk); recurses.push_back(lk); loopp = bfsTraversePath(lk, lk, g, true); recurses.pop_back(); } for (unsigned int ik = 0; ik < loopp.size(); ik++) { if (find(globalLoopPaths[lk].begin(), globalLoopPaths[lk].end(), loopp[ik]) == globalLoopPaths[lk].end()) { globalLoopPaths[localLoops[k]].push_back(loopp[ik]); } } } borrowed = true; std::vector<std::vector<int> > lps2; unsigned int maxpaths = 1000; unsigned int pathdivisor = 1;//paths.size()/maxpaths;///paths.size(); //if (pathdivisor < 1) { pathdivisor = 1; maxpaths = paths.size(); // } /* for (unsigned int j = 0; j < pathdivisor+1; j++) { std::vector<std::vector<int> > npaths; std::vector<int> dummyvec; unsigned int mxpths; if (j < pathdivisor) { mxpths = maxpaths; } else { mxpths = paths.size() % pathdivisor; } for (unsigned int k = 0; k < mxpths; k++) { npaths.push_back(paths.back());//unzipPath(paths.back(), g, begin, end)); paths.pop_back(); } */ pathStore = paths; paths.clear(); if (!recursedloop) { uTraversePath(begin, end, g, false, globalLoopPaths); } else { recursed++; std::set<std::vector<int> > lps = uTraversePath(begin, end, g, true, globalLoopPaths); recursed--; for (std::set<std::vector<int> >::iterator ij = lps.begin(); ij != lps.end(); ij++) { std::vector<int> ijk = (*ij); lps2.push_back(*ij); } } //} #ifdef PERFDEBUG // timeval tim; //std::cout << "begin: " << begin << " end: " << end << std::endl; gettimeofday(&tim, NULL); double tim2 = tim.tv_sec+(tim.tv_usec/1000000); double timeRet = tim2 - tim1; //std::cout << "bfs time elapsed: " << timeRet << std::endl; #endif return lps2; } /** This function calculates all the permutations of loops on paths it also throws away duplicate paths Input: @param[begin] integer representation of first node @param[end] integer representation of the final node @param[g] ambient CFG @param[globalLoopPaths] connects an integer representation of a node to all possible loops starting at that node */ template<class CFG> std::set<std::vector<int> > SgGraphTraversal<CFG>:: uTraversePath(int begin, int end, CFG*& g, bool loop, std::map<int, std::vector<std::vector<int> > >& globalLoopPaths) { //std::cout << "uTraverse" << std::endl; int doubledpaths = 0; int newmil = 1; //#ifdef LP //if (loop && loopStore.find(begin) != loopStore.end()) { // return loopStore[begin]; //} //#endif #ifdef PERFDEBUG timeval tim; gettimeofday(&tim, NULL); double t1 = tim.tv_sec+(tim.tv_usec/1000000); #endif std::set<std::vector<int> > newpaths; std::set<std::vector<int> > npaths; pathnum = 0; std::vector<int> path; std::vector<std::vector<int> > paths; int truepaths = 0; std::vector<std::vector<int> > checkpaths; std::vector<std::vector<int> > npathchecker; std::map<int, int> currents; //int nnumpaths = 0; std::set<std::vector<int> > loopPaths; //bool threadsafe = true; bool done = false; std::set<std::vector<int> > fts; double ttfors = 0; double tperms = 0; while (true) { //std::cout << "paths.size() " << paths.size() << std::endl; if (paths.size() > 1000000) { std::cout << "nearly 1 million paths with no loops, stopping" << std::endl; return loopPaths; std::cout << "ended early" << std::endl; } if (done || borrowed) { if (borrowed) { paths = pathStore; pathStore.clear(); } //std::cout << "paths.size(): " << paths.size() << std::endl; if (paths.size() != 0) { } else { return loopPaths; } // #pragma omp parallel // { #pragma omp parallel for schedule(guided) for (unsigned int qqq = 0; qqq < paths.size(); qqq++) { // std::cout << "pathcheck" << std::endl; //int pathevals = 0; //std::vector<int> zpt = zipPath2(paths[qqq], g); //std::set<std::vector<int> > boxpaths; std::set<std::vector<int> > movepaths; std::vector<int> path;// = paths[qqq]; path = paths[qqq];//unzipPath(paths[qqq], g, begin, end); truepaths++; int permnums = 1; std::vector<int> perms; std::vector<unsigned int> qs; std::map<int, std::vector<std::vector<int> > > localLoops; std::vector<int> takenLoops; takenLoops.push_back(path[0]); bool taken = false; timeval timfor; int lost = 0; gettimeofday(&timfor, NULL); double t1for = timfor.tv_sec + (timfor.tv_usec/1000000); for (unsigned int q = 1; q < path.size()-1; q++) { //if (find(closures.begin(), closures.end(), path[q]) != closures.end()) { if (globalLoopPaths.find(path[q]) != globalLoopPaths.end() /*&& find(lloops.begin(), lloops.end(), path[q]) != lloops.end()*/ && globalLoopPaths[path[q]].size() != 0 /*&& path[q] != begin && path[q] != end*/) { for (unsigned int qp1 = 0; qp1 < globalLoopPaths[path[q]].size(); qp1++) { std::vector<int> gp = globalLoopPaths[path[q]][qp1]; //unzipPath(globalLoopPaths[path[q]][qp1],g,path[q],path[q]); // std::vector<int> zgp = zipPath2(globalLoopPaths[zpt[q]][qp1], g); for (unsigned int qp2 = 0; qp2 < takenLoops.size(); qp2++) { if (find(gp.begin(),gp.end(), takenLoops[qp2]) != gp.end()) { taken = true; } } if (!taken) { localLoops[path[q]].push_back(gp); } else { lost++; taken = false; } } if (localLoops[path[q]].size() != 0) { takenLoops.push_back(path[q]); permnums *= (localLoops[path[q]].size()+1); perms.push_back(permnums); qs.push_back(path[q]); } } } //} //if (loop) { //std::cout << "lostloop: " << lost << std::endl; //} //else { //std::cout << "lostpath: " << lost << std::endl; //} //std::cout << "endpathcheck" << std::endl; //std::cout << "rest" << std::endl; //std::cout << "permnums: " << permnums << std::endl; gettimeofday(&timfor, NULL); double t2for = timfor.tv_sec + (timfor.tv_usec/1000000); //double ttfor = t2for - t1for; //#pragma omp atomic //ttfors += ttfor; //std::set<std::vector<int> > movepaths2; std::set<std::vector<int> > movepathscheck; timeval timperms; gettimeofday(&timperms, NULL); // double t1perm = timperms.tv_sec + (timperms.tv_usec/1000000); std::vector<int> nvec; std::vector<std::vector<int> > boxpaths(permnums, nvec); //#pragma omp parallel for schedule(guided) for (int i = 1; i <= permnums; i++) { //bool goodthread = false; std::vector<int> loopsTaken; //bool stop = false; unsigned int j = 0; std::vector<int> npath; while (true) { if (j == perms.size() || perms[j] > i) { break; } else { j++; } } int pn = i; std::vector<int> pL; for (unsigned int j1 = 0; j1 <= j; j1++) { pL.push_back(-1); } for (unsigned int k = j; k > 0; k--) { int l = 1; while (perms[k-1]*l < pn) { l++; } pL[k] = l-2; pn -= (perms[k-1]*(l-1)); } pL[0] = pn-2; unsigned int q2 = 0; for (unsigned int q1 = 0; q1 < path.size(); q1++) { if (q2 < qs.size()) { if (qs.size() != 0 && path[q1] == qs[q2] && q2 != pL.size()) { if (pL[q2] == -1) { npath.push_back(path[q1]); } else { // if (!stop) { npath.insert(npath.end(), localLoops[path[q1]][pL[q2]].begin(), localLoops[path[q1]][pL[q2]].end()); // } } q2++; } else { npath.push_back(path[q1]); } } else { npath.push_back(path[q1]); } } #ifdef FULLDEBUG std::cout << "path: " << std::endl; for (int qe = 0; qe < npath.size(); qe++) { std::cout << ", " << npath[qe]; } std::cout << std::endl; std::cout << "permnum: " << i << std::endl; #endif // bool addit = false; //if (!stop) { // if (loop && npath.front() == npath.back()) { // addit = true; // } // else if (!loop && bound && npath.front() == begin && npath.back() == end && npath.size() != 1) { // addit = true; // } // else if (!loop && !bound) { // addit = true; // } // if (!addit) { // std::cout << "bad path" << std::endl; // } //bool extra = false; //if (addit && !loop) { //if (movepathscheck.find(npath) == movepathscheck.end()) { //int mpc = movepathscheck.size(); //std::set<std::vector<int> > movepathspre = movepathscheck; // movepaths2.insert(npath); //movepathscheck.insert(npath); //ROSE_ASSERT(movepathscheck.size() == mpc || movepathspre.find(npath) == movepathspre.end()); //if (movepathscheck.size() == mpc) { // extra = true; // } //} //else { //#pragma omp atomic // doubledpaths++; // } //} //if (!workingthread || threadsafe) { //if ((newpaths.size() > 1 || i == permnums || threadsafe)) { // } // } // } //if (!extra) // { //if (movepaths2.size() > 0) //|| i == permnums || threadsafe) // #pragma omp critical // { boxpaths[i-1] = npath; // } // } //std::cout << "endrest" << std::endl; } evaledpaths += boxpaths.size(); if (evaledpaths > newmil*100000) { //std::cout << "evaledpaths: " << evaledpaths << std::endl; newmil++; } // #pragma omp critical // { if (!loop) { for (std::vector<std::vector<int> >::iterator box = boxpaths.begin(); box != boxpaths.end(); box++) { std::vector<Vertex> verts; getVertexPath((*box), g, verts); #pragma omp critical { analyzePath(verts); } } } else { #pragma omp critical { loopPaths.insert(boxpaths.begin(), boxpaths.end());; } } } } //} /* #pragma omp atomic evaledpaths++; //pathevals++; if (evaledpaths % 10000 == 0 && evaledpaths != 0) { std::cout << "evaled paths: " << evaledpaths << std::endl; } if (!loop) { std::vector<Vertex> verts; getVertexPath(npath, g, verts); #pragma omp critical { #ifdef FULLDEBUG for (unsigned int aa = 0; aa < npath.size(); aa++) { if (ptsNum.find(npath[aa]) != ptsNum.end()) { ptsNum[npath[aa]] += 1; } else { ptsNum[npath[aa]] = 1; } } #endif analyzePath(verts); } } else if (loop) { //std::vector<int> zpth = zipPath(npath, g, npath.front(), npath.back()); #pragma omp critical { loopPaths.insert(npath);//zipPath(npath, g, npath.front(), npath.back())); } } else { } } */ // movepaths2.clear(); // std::cout << "permnums: " << permnums << std::endl; // std::cout << "evaledpaths final: " << pathevals << std::endl; //gettimeofday(&timperms, NULL); //double t2perm = timperms.tv_sec+(timperms.tv_usec/1000000); //#pragma omp atomic //tperms += t2perm - t1perm; // } //} //} //} #ifdef PERFDEBUG gettimeofday(&tim, NULL); // double t2 = tim.tv_sec+(tim.tv_usec/1000000.0); // double tperm = t2 - t1perm //double tX = t2 - t1; //std::cout << "begin: " << begin << " end: " << end << std::endl; // std::cout << "uTraverse time: " << tX << std::endl; // std::cout << "tperms: " << tperms << std::endl; // std::cout << "ttfors: " << ttfors << std::endl; // std::cout << "doubledpaths: " << doubledpaths << std::endl; #endif #ifdef LP if (loop) { #ifdef PERFDEBUG // std::cout << "loopPaths: " << loopPaths.size() << std::endl; #endif loopStore[begin] = loopPaths; } #endif return loopPaths; } } /** This is the function that is used by the user directly to start the algorithm. It is immediately available to the user SgGraphTraversal::constructPathAnalyzer Input: @param[begin] Vertex, starting node @param[end] Vertex, endnode @param[g] CFG* g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: constructPathAnalyzer(CFG* g, bool unbounded, Vertex begin, Vertex end, bool ns) { abnormals = 0; normals = 0; if (ns) { needssafety = true; } else { needssafety = false; } checkedfound = 0; recursed = 0; nextsubpath = 0; borrowed = true; stoppedpaths = 0; evaledpaths = 0; badpaths = 0; sourcenum = 0; prepareGraph(g); workingthread = false; workingthreadnum = -1; //std::cout << "markers: " << markers.size() << std::endl; //std::cout << "closures: " << closures.size() << std::endl; //std::cout << "sources: " << sources.size() << std::endl; //std::cout << "sinks" << sinks.size() << std::endl; // printHotness(g); bool subgraph = false; if (!subgraph) { if (!unbounded) { bound = true; recursiveLoops.clear(); recurses.clear(); std::vector<std::vector<int> > spaths = bfsTraversePath(vertintmap[begin], vertintmap[end], g); // std::cout << "spaths: " << spaths.size() << std::endl; } else { std::set<int> usedsources; bound = false; std::vector<int> localLps; for (unsigned int j = 0; j < sources.size(); j++) { sourcenum = sources[j]; recursiveLoops.clear(); recurses.clear(); std::vector<std::vector<int> > spaths = bfsTraversePath(sources[j], -1, g); } } } //std::cout << "checkedfound: " << checkedfound << std::endl; printHotness(g); } /** DEPRECATED This is a function to construct subgraphs for parallelization SgGraphTraversal::computeSubGraphs Input: @param[begin] const int, starting point @param[end] const int ending point @param[g] const CFG*, control flow graph to compute @param[depthDifferential] int, used to specify how large the subgraph should be */ template<class CFG> void SgGraphTraversal<CFG>:: computeSubGraphs(const int& begin, const int &end, CFG*& g, int depthDifferential) { int minDepth = 0; int maxDepth = minDepth + depthDifferential; int currSubGraph = 0; CFG* subGraph; std::set<int> foundNodes; while (true) { Vertex begin = boost::add_vertex(*subGraphVector[currSubGraph]); GraphSubGraphMap[currSubGraph][intvertmap[orderOfNodes[minDepth]]] = intvertmap[begin]; SubGraphGraphMap[currSubGraph][intvertmap[begin]] = intvertmap[orderOfNodes[minDepth]]; for (int i = minDepth; i <= maxDepth; i++) { Vertex v = GraphSubGraphMap[currSubGraph][intvertmap[orderOfNodes[i]]]; std::vector<int> outEdges = getOutEdges(orderOfNodes[i], g); for (unsigned int j = 0; j < outEdges.size(); j++) { Vertex u; if (foundNodes.find(getTarget(outEdges[j], g)) == foundNodes.end()) { u = GraphSubGraphMap[currSubGraph][intvertmap[getTarget(outEdges[j], g)]]; } else { u = boost::add_vertex(*subGraphVector[currSubGraph]); foundNodes.insert(getTarget(outEdges[j], g)); SubGraphGraphMap[currSubGraph][u] = intvertmap[getTarget(outEdges[j], g)]; GraphSubGraphMap[currSubGraph][intvertmap[getTarget(outEdges[j], g)]] = u; } Edge edge; bool ok; boost::tie(edge, ok) = boost::add_edge(v,u,*subGraphVector[currSubGraph]); } } minDepth = maxDepth; if ((unsigned int) minDepth == orderOfNodes.size()-1) { break; } maxDepth += depthDifferential; if ((unsigned int) maxDepth > orderOfNodes.size()-1) { maxDepth = orderOfNodes.size()-1; } CFG* newSubGraph; subGraphVector.push_back(newSubGraph); currSubGraph++; } return; } /* These should NOT be used by the user. They are simply for writing interesting information on the DOT graphs of the CFG */ template<class CFG> void SgGraphTraversal<CFG>:: printCFGNodeGeneric(int &cf, std::string prop, std::ofstream& o) { std::string nodeColor = "black"; o << cf << " [label=\"" << " num:" << cf << " prop: " << prop << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n"; } template<class CFG> void SgGraphTraversal<CFG>:: printCFGNode(int& cf, std::ofstream& o) { #ifdef FULLDEBUG int pts = ptsNum[cf]; std::string nodeColor = "black"; o << cf << " [label=\"" << " pts: " << pts << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n"; #endif #ifndef FULLDEBUG std::string nodeColor = "black"; o << cf << " [label=\"" << " num:" << cf << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n"; #endif } template<class CFG> void SgGraphTraversal<CFG>:: printCFGEdge(int& cf, CFG*& cfg, std::ofstream& o) { int src = getSource(cf, cfg); int tar = getTarget(cf, cfg); o << src << " -> " << tar << " [label=\"" << src << " " << tar << "\", style=\"" << "solid" << "\"];\n"; } template<class CFG> void SgGraphTraversal<CFG>:: printHotness(CFG*& g) { const CFG* gc = g; int currhot = 0; std::ofstream mf; std::stringstream filenam; filenam << "hotness" << currhot << ".dot"; currhot++; std::string fn = filenam.str(); mf.open(fn.c_str()); mf << "digraph defaultName { \n"; vertex_iterator v, vend; edge_iterator e, eend; for (tie(v, vend) = vertices(*gc); v != vend; ++v) { printCFGNode(vertintmap[*v], mf); } for (tie(e, eend) = edges(*gc); e != eend; ++e) { printCFGEdge(edgeintmap[*e], g, mf); } mf.close(); } template<class CFG> void SgGraphTraversal<CFG>:: printPathDot(CFG*& g) { const CFG* gc = g; std::ofstream mf; std::stringstream filenam; filenam << "pathnums.dot"; std::string fn = filenam.str(); mf.open(fn.c_str()); mf << "digraph defaultName { \n"; vertex_iterator v, vend; edge_iterator e, eend; for (tie(v, vend) = vertices(*gc); v != vend; ++v) { if (nodeStrings.find(vertintmap[*v]) != nodeStrings.end()) { int nn = vertintmap[*v]; printCFGNodeGeneric(vertintmap[*v], nodeStrings[nn], mf); } else { printCFGNodeGeneric(vertintmap[*v], "noprop", mf); } } for (tie(e, eend) = edges(*gc); e != eend; ++e) { printCFGEdge(edgeintmap[*e], g, mf); } mf.close(); } /** This is the function that preps the graph for traversal SgGraphTraversal::prepareGraph Input: @param[g] CFG*& g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: prepareGraph(CFG*& g) { nextNode = 1; nextEdge = 1; findClosuresAndMarkersAndEnumerate(g); } /** DEPRECATED This is the function that preps the graph for traversal, currently this one isn't used but for many traversals on one visitor may necessitate SgGraphTraversal::firstPrepGraph Input: @param[g] CFG*& g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: firstPrepGraph(CFG*& g) { nextNode = 1; nextEdge = 1; findClosuresAndMarkersAndEnumerate(g); } /** This calculates nodes with more than one in edge or more than one out edge SgGraphTraversal::findClosuresAndMarkers Input: @param[g] CFG*& g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: findClosuresAndMarkersAndEnumerate(CFG*& g) { edge_iterator e, eend; for (tie(e, eend) = edges(*g); e != eend; ++e) { intedgemap[nextEdge] = *e; edgeintmap[*e] = nextEdge; nextEdge++; } vertex_iterator v1, vend1; for (tie(v1, vend1) = vertices(*g); v1 != vend1; ++v1) { vertintmap[*v1] = nextNode; intvertmap[nextNode] = *v1; nextNode++; } vertex_iterator v, vend; for (tie(v, vend) = vertices(*g); v != vend; ++v) { std::vector<int> outs = getOutEdges(vertintmap[*v], g); std::vector<int> ins = getInEdges(vertintmap[*v], g); if (outs.size() > 1) { markers.push_back(vertintmap[*v]); markerIndex[vertintmap[*v]] = markers.size()-1; for (unsigned int i = 0; i < outs.size(); i++) { pathsAtMarkers[vertintmap[*v]].push_back(getTarget(outs[i], g)); } } if (ins.size() > 1) { closures.push_back(vertintmap[*v]); } if (outs.size() == 0) { sinks.push_back(vertintmap[*v]); } if (ins.size() == 0) { sources.push_back(vertintmap[*v]); } } return; } /** DEPRECATED Currently unused but will be necessary for parallelization in progress SgGraphTraversal::computeOrder @param[g] CFG* cfg in question @parm[begin] const int, integer representation of source node */ template<class CFG> void SgGraphTraversal<CFG>:: computeOrder(CFG*& g, const int& begin) { std::vector<int> currentNodes; std::vector<int> newCurrentNodes; currentNodes.push_back(begin); std::map<int, int> reverseCurrents; orderOfNodes.push_back(begin); std::set<int> heldBackNodes; while (currentNodes.size() != 0) { for (unsigned int j = 0; j < currentNodes.size(); j++) { std::vector<int> inEdges = getInEdges(currentNodes[j], g); if (inEdges.size() > 1) { if (reverseCurrents.find(currentNodes[j]) == reverseCurrents.end()) { reverseCurrents[currentNodes[j]] = 0; } if ((unsigned int) reverseCurrents[currentNodes[j]] == inEdges.size() - 1) { heldBackNodes.erase(currentNodes[j]); reverseCurrents[currentNodes[j]]++; std::vector<int> outEdges = getOutEdges(currentNodes[j], g); for (unsigned int k = 0; k < outEdges.size(); k++) { newCurrentNodes.push_back(getTarget(outEdges[k], g)); orderOfNodes.push_back(getTarget(outEdges[k], g)); } } else if (reverseCurrents[currentNodes[j]] < reverseCurrents.size()) { reverseCurrents[currentNodes[j]]++; if (heldBackNodes.find(currentNodes[j]) == heldBackNodes.end()) { heldBackNodes.insert(currentNodes[j]); } } } else { std::vector<int> outEdges = getOutEdges(currentNodes[j], g); for (unsigned int k = 0; k < outEdges.size(); k++) { newCurrentNodes.push_back(getTarget(outEdges[k], g)); orderOfNodes.push_back(getTarget(outEdges[k], g)); } } } if (newCurrentNodes.size() == 0 && heldBackNodes.size() != 0) { for (std::set<int>::iterator q = heldBackNodes.begin(); q != heldBackNodes.end(); q++) { int qint = *q; std::vector<int> heldBackOutEdges = getOutEdges(qint, g); for (unsigned int p = 0; p < heldBackOutEdges.size(); p++) { newCurrentNodes.push_back(getTarget(heldBackOutEdges[p], g)); } } heldBackNodes.clear(); } currentNodes = newCurrentNodes; newCurrentNodes.clear(); } return; } /** Converts the path calculated by this algorithm to Vertices so users can access data SgGraphTraversal::getVertexPath @param[path] integer representation of path @param[g] CFG*, cfg in question @param[vertexPath] for some reason this can't be a return value so it is changed via pass by reference */ template<class CFG> void SgGraphTraversal<CFG>:: getVertexPath(std::vector<int> path, CFG*& g, std::vector<Vertex>& vertexPath) { for (unsigned int i = 0; i < path.size(); i++) { vertexPath.push_back(intvertmap[path[i]]); } } /** DEPRECATED Currently unused, may eventually be modified for optimal storage purposes SgGraphTraversal::storeCompact @param[compactPath] path to be compactified */ template<class CFG> void SgGraphTraversal<CFG>:: storeCompact(std::vector<int> compactPath) { return; }
costmap_creators.h
/* Copyright (c) 2013, Kai Klindworth All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef COSTMAP_CREATORS_H #define COSTMAP_CREATORS_H #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include "disparity_toolkit/genericfunctions.h" #include "disparity_toolkit/disparity_range.h" namespace costmap_creators { template<typename T> cv::Mat_<T> create_cost_map(const cv::Mat& image, int range, T default_value) { int sz[] = {image.size[0], image.size[1], range}; return cv::Mat_<T>(3, sz, default_value); } template<typename T> cv::Mat_<T> create_cost_map(const cv::Mat& image, const disparity_range& range, T default_value) { return create_cost_map<T>(image, range.size(), default_value); } template<typename T, typename lambda_func> inline void transform_range(cv::Mat& cost_map, int y, int x, const disparity_range& crange, lambda_func func) { T *result_ptr = cost_map.ptr<T>(y,x, crange.index(crange.start())); for(int d = crange.start(); d <= crange.end(); ++d) { *result_ptr++ = func(y, x, d); } } namespace sliding_window { template<typename cost_class> cv::Mat joint_fixed_size(const cv::Mat& base, const cv::Mat& match, const disparity_range range, const int windowsize) { assert(windowsize > 0); using prob_table_type = typename cost_class::result_type; cv::Mat cost_map = create_cost_map<prob_table_type>(base, range, std::numeric_limits<prob_table_type>::max()/3); const int border = windowsize/2; const int y_min = border; const int y_max = base.rows - border; const int x_min = border; const int x_max = base.cols - border; cost_class cost_agg(base, match, windowsize); typename cost_class::thread_type thread_data; #pragma omp parallel for private(thread_data) for(int y = y_min; y < y_max; ++y) { cost_agg.prepare_row(thread_data, y); cv::Mat windowBase = subwindow(base, x_min, y, windowsize); for(int x = x_min; x < x_max; ++x) { cost_agg.prepare_window(thread_data, windowBase); const disparity_range crange = range.restrict_to_image(x, base.cols, border); transform_range<prob_table_type>(cost_map, y, x, crange, [&](int, int x, int d){ return cost_agg.increm(thread_data, x+d); }); windowBase.adjustROI(0,0,-1,1); } } return cost_map; } template<typename cost_class> cv::Mat joint_flexible_size(const cv::Mat& base, const cv::Mat& match, const disparity_range range, const cv::Mat_<cv::Vec2b>& windowsizes) { int min_windowsize = 7; typedef float prob_table_type; cv::Mat cost_map = create_cost_map<prob_table_type>(base, range, 8); const int y_min = min_windowsize/2; const int y_max = base.rows - min_windowsize/2; const int x_min = min_windowsize/2; const int x_max = base.cols - min_windowsize/2; cost_class cost_agg(base, match, range); typename cost_class::thread_type thread_data; #pragma omp parallel for private(thread_data) for(int y = y_min; y < y_max; ++y) { cost_agg.prepare_row(thread_data, y); for(int x = x_min; x < x_max; ++x) { const cv::Vec2b cwindowsize = windowsizes(y,x); if(cwindowsize[0] > 0 && cwindowsize[1] > 0) { cv::Mat windowBase = subwindow(base, x, y, cwindowsize[1], cwindowsize[0] ); cost_agg.prepareWindow(thread_data, windowBase, cwindowsize[1], cwindowsize[0] ); const disparity_range crange = range.restrict_to_image(x, base.cols, cwindowsize[1]/2); transform_range<prob_table_type>(cost_map, y, x, crange, [&](int, int x, int d) { return cost_agg.increm(thread_data, x, d); }); } } } return cost_map; } //berechnet fuer subranges die disparitaet. disp*_comp gibt den gesamten Bereich an, rangeCenter-/+dispRange/2 den Teilbereich template<typename cost_class> cv::Mat flexible_size_flexible_disparityrange(const cv::Mat& base, const cv::Mat& match, const cv::Mat& windowsizes, const cv::Mat& rangeCenter, int disparity_delta, const disparity_range range_bound, unsigned int min_windowsize, unsigned int max_windowsize) { typedef float prob_table_type; cv::Mat cost_map = create_cost_map<prob_table_type>(base, disparity_delta*2+1, 8); const int y_min = min_windowsize/2; const int y_max = base.rows - min_windowsize/2; const int x_min = min_windowsize/2; const int x_max = base.cols - min_windowsize/2; cost_class cost_agg(base, match, range_bound, max_windowsize); typename cost_class::thread_type thread_data; #pragma omp parallel for private(thread_data) for(int y = y_min; y < y_max; ++y) { cost_agg.prepare_row(thread_data, y); for(int x = x_min; x < x_max; ++x) { cv::Vec2b cwindowsize = windowsizes.at<cv::Vec2b>(y,x); const disparity_range crange = range_bound.subrange_with_subspace(rangeCenter.at<short>(y,x), disparity_delta).restrict_to_image(x, base.cols, cwindowsize[1]/2); if(cwindowsize[0] > 0 && cwindowsize[1] > 0 && crange.end() > crange.start()) { cost_agg.prepare_window(thread_data, x, cwindowsize[1], cwindowsize[0] ); transform_range<prob_table_type>(cost_map, y, x, crange, [&](int, int x, int d) { return cost_agg.increm(thread_data, x, d); }); } } } return cost_map; } } //! Calculates pointwise (no window involved) the disparity. cost aggregator must be passed as a function object, returns a cost map template<typename cost_class, typename data_type> cv::Mat calculate_pixelwise(cv::Mat base, data_type data, const disparity_range range) { cv::Mat result = create_cost_map<float>(base, range, std::numeric_limits<float>::max()); cost_class cost_agg(base, data, range.start()); #pragma omp parallel for for(int y = 0; y< base.size[0]; ++y) { for(int x = 0; x < base.size[1]; ++x) { const disparity_range crange = range.restrict_to_image(x, base.size[1]); transform_range<float>(result, y, x, crange, [&](int y, int x, int d) { return cost_agg(y, x, d); }); } } return result; } /*template<typename data_type> cv::Mat flexBoxFilter(cv::Mat src, cv::Mat windowsizes) { cv::Mat cost_map(src.size(), CV_32FC1, cv::Scalar(8)); #pragma omp parallel for default(none) shared(src, cost_map, windowsizes) for(int y = 0; y < src.rows; ++y) { for(int x = 0; x < src.cols; ++x) { cv::Vec2b cwindowsize = windowsizes.at<cv::Vec2b>(y,x); if(cwindowsize[0] > 0 && cwindowsize[1] > 0) { cv::Mat windowBase = subwindow(src, x, y, cwindowsize[1], cwindowsize[0] ); cost_map.at<float>(y,x) = cv::sum(windowBase)/windowBase.total(); } } } return cost_map; }*/ } template<typename cost_type, typename window_type> cv::Mat disparitywise_calculator(cost_type cost_func, window_type window_sum, cv::Size base_size, const disparity_range range) { int sz[] = {base_size.height, base_size.width, range.size()}; cv::Mat_<float> result = cv::Mat(3, sz, CV_32FC1, cv::Scalar(std::numeric_limits<float>::max())); //cv::Mat_<float> result = costmap_creators::sliding_window::create_cost_map(base) #pragma omp parallel for for(int d = range.start(); d <= range.end(); ++d) { cv::Mat temp_result = window_sum(cost_func(d), d); for(int y = 0; y < base_size.height; ++y) { if(d < 0) { for(int x = -d; x < base_size.width; ++x) result(y,x,range.index(d)) = temp_result.at<float>(y, x+d); } else { int max_x = base_size.width - d; for(int x = 0; x < max_x; ++x) result(y,x,range.index(d)) = temp_result.at<float>(y, x); } } } return result; } template<typename cost_type> cv::Mat simple_window_disparitywise_calculator(cost_type cost_func, cv::Size window_size, cv::Size base_size, const disparity_range range) { auto window_sum = [=](const cv::Mat& pre_result, int){ cv::Mat temp_result; cv::boxFilter(pre_result, temp_result, -1, window_size); return temp_result; }; return disparitywise_calculator(cost_func, window_sum, base_size, range); } #endif // COSTMAP_CREATORS_H
raytrace.c
#ifndef __OPENCL_VERSION__ #include "raytrace.h" #include "affine.h" #include "bbox.h" #include "cx/bittable.h" #include "color.h" #include "lightcut.h" #include "order.h" #include "point.h" #include "simplex.h" #include "space-junk.h" #include "xfrm.h" #include <assert.h> #include <math.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #endif /* #ifndef __OPENCL_VERSION__ */ /* Note: When setting this to false, set MaxKDTreeDepth = 0 in kdtree.c.*/ static const bool KDTreeRayTrace = true; #if NDimensions == 3 static const bool BarycentricRayTrace = false; #else static const bool BarycentricRayTrace = true; #endif #ifndef __OPENCL_VERSION__ //static bool DBogTraceOn = false; #ifdef TrivialMpiRayTrace #include <mpi.h> static void balancer_triv_sync_mpi_RayImage (RayImage* image, uint row_off, uint row_nul, uint nprocs); static void computer_triv_sync_mpi_RayImage (const RayImage* image, uint row_off, uint row_nul, uint myrank, uint nprocs); #endif static void partition_ObjectRaySpace (ObjectRaySpace* space); static void partition_verts_ObjectRaySpace (ObjectRaySpace* space); static void init_Scene_KPTreeGrid (KPTreeGrid* grid, const Scene* scene); static void init_RaySpace_KDTreeGrid (KDTreeGrid* grid, const RaySpace* space); static void fill_pixel (Color* ret_color, uint hitidx, real mag, uint objidx, const RayImage* image, const Point* origin, const Point* dir, const RaySpace* space, Trit front, uint nbounces, URandom* urandom); static void cast_colors (Color* ret_color, const RaySpace* restrict space, const RayImage* restrict image, const Point* restrict origin, const Point* restrict dir, const Color* factor, Trit front, uint nbounces, URandom* urandom); static void cast_row_orthographic (RayImage* restrict image, uint row, const RaySpace* restrict space, const RayCastAPriori* restrict known, URandom* urandom); static void cast_row_perspective (RayImage* image, uint row, const RaySpace* restrict space, const RayCastAPriori* restrict known, URandom* urandom); /* Include all packet tracing stuff here. * Keep dependence on this minimal. */ #ifdef PackOpsAvail #include "raytrace-pack.c" #endif void init_RaySpace (RaySpace* space) { init_ObjectRaySpace (&space->main); space->nobjects = 0; space->nlights = 0; init_KDTree (&space->object_tree); space->partition = true; zero_Point (&space->box.min); zero_Point (&space->box.max); space->skytxtr = 0; init_LightCutTree (&space->lightcuts); } void init_ObjectRaySpace (ObjectRaySpace* space) { zero_Point (&space->centroid); identity_PointXfrm (&space->orientation); init_Scene (&space->scene); space->nelems = 0; init_KDTree (&space->tree); init_KPTree (&space->verttree); space->visible = true; } void init_PointLightSource (PointLightSource* light) { zero_Point (&light->location); set_Color (&light->intensity, 1); light->diffuse = false; light->hemisphere = false; light->on = true; } void copy_PointLightSource (PointLightSource* dst, const PointLightSource* src) { *dst = *src; } void init_filled_RaySpace (RaySpace* space) { uint i; init_filled_ObjectRaySpace (&space->main); UFor( i, space->nobjects ) init_filled_ObjectRaySpace (&space->objects[i]); } static void update_internal_transformed_ObjectRaySpace (ObjectRaySpace* space) { uint ei; const Scene* scene; scene = &space->scene; init_BBox (&space->box, scene->nverts, scene->verts); UFor( ei, space->nelems ) { uint pi; const SceneElement* elem; Simplex* tri; elem = &scene->elems[ei]; tri = &space->elems[ei]; UFor( pi, NDimensions ) { uint vi; vi = elem->verts[pi]; assert (vi < scene->nverts); copy_Point (&tri->pts[pi], &scene->verts[vi]); } } UFor( ei, space->nelems ) { Simplex raw; bool good; simplex_Scene (&raw, scene, ei); good = init_BarySimplex (&space->simplices[ei], &raw); if (!good) DBog1( "ei:%u", ei ); /* assert (good); */ } } void init_filled_ObjectRaySpace (ObjectRaySpace* object) { object->nelems = object->scene.nelems; AllocTo( object->elems, object->nelems ); AllocTo( object->simplices, object->nelems ); update_internal_transformed_ObjectRaySpace (object); partition_ObjectRaySpace (object); partition_verts_ObjectRaySpace (object); } void init_trivial_ObjectRaySpace (ObjectRaySpace* object) { object->nelems = object->scene.nelems; AllocTo( object->elems, object->nelems ); AllocTo( object->simplices, object->nelems ); update_internal_transformed_ObjectRaySpace (object); build_trivial_KDTree (&object->tree, object->nelems, &object->box); } void update_trivial_ObjectRaySpace (ObjectRaySpace* object) { update_internal_transformed_ObjectRaySpace (object); object->tree.nodes[0].as.leaf.box = object->box; } void cleanup_RaySpace (RaySpace* space) { uint i; cleanup_ObjectRaySpace (&space->main); UFor( i, space->nobjects ) cleanup_ObjectRaySpace (&space->objects[i]); if (space->nobjects > 0) free (space->objects); if (space->nlights > 0) free (space->lights); lose_KDTree (&space->object_tree); lose_LightCutTree (&space->lightcuts); } void cleanup_ObjectRaySpace (ObjectRaySpace* space) { cleanup_Scene (&space->scene); lose_KDTree (&space->tree); lose_KPTree (&space->verttree); if (space->nelems > 0) { free (space->elems); free (space->simplices); } } /* Partition the space containing dynamic objects.*/ void update_dynamic_RaySpace (RaySpace* space) { if (space->partition && space->nobjects > 0) { KDTreeGrid grid; init_RaySpace_KDTreeGrid (&grid, space); space->box = grid.box; /* Since it's a regeneration, clean up the previous version.*/ lose_KDTree (&space->object_tree); build_KDTree (&space->object_tree, &grid, 0); /* output_KDTreeGrid (stderr, &grid); */ /* output_KDTree (stderr, &space->object_tree); */ lose_KDTreeGrid (&grid); } else { space->partition = false; space->box = space->main.box; } } void partition_ObjectRaySpace (ObjectRaySpace* space) { KDTreeGrid grid; init_Scene_KDTreeGrid (&grid, &space->scene, &space->box); #if 1 build_KDTree (&space->tree, &grid, space->elems); #else /* Can use this code for development, less time spent building tree.*/ build_KDTree (&space->tree, &grid, 0); #endif #if 0 printf ("nnodes:%u nelemidcs:%u\n", space->tree.nnodes, space->tree.nelemidcs); #endif lose_KDTreeGrid (&grid); } void partition_verts_ObjectRaySpace (ObjectRaySpace* space) { KPTreeGrid grid; init_Scene_KPTreeGrid (&grid, &space->scene); build_KPTree (&space->verttree, &grid); lose_KPTreeGrid (&grid); } void init_Scene_KDTreeGrid (KDTreeGrid* grid, const Scene* scene, const BBox* box) { uint i, nelems; nelems = scene->nelems; assert (nelems > 0); init_KDTreeGrid( grid, nelems ); fill_minimal_unique (grid->elemidcs, grid->nelems); UFor( i, NDimensions ) fill_minimal_unique (grid->intls[i], 2*nelems); UFor( i, nelems ) { uint pi, dim, ti; const SceneElement* elem; real coords[NDimensions][2]; elem = &scene->elems[i]; UFor( dim, NDimensions ) { coords[dim][0] = Max_real; coords[dim][1] = Min_real; } UFor( pi, NDimensions ) { const Point* p; p = &scene->verts[elem->verts[pi]]; UFor( dim, NDimensions ) { assert (inside_BBox (box, p)); if (p->coords[dim] < coords[dim][0]) coords[dim][0] = p->coords[dim]; if (p->coords[dim] > coords[dim][1]) coords[dim][1] = p->coords[dim]; } } ti = 2*i; UFor( dim, NDimensions ) { grid->coords[dim][ti] = coords[dim][0]; grid->coords[dim][ti+1] = coords[dim][1]; } } grid->box = *box; } void init_Scene_KPTreeGrid (KPTreeGrid* grid, const Scene* scene) { init_KPTreeGrid (grid, scene->nverts); {:for (i ; scene->nverts) set1_KPTreeGrid (grid, i, &scene->verts[i]); } } void init_RaySpace_KDTreeGrid (KDTreeGrid* grid, const RaySpace* space) { uint i; uint nvisible = 0; init_KDTreeGrid (grid, 1 + space->nobjects); UFor( i, NDimensions ) { fill_minimal_unique (grid->intls[i], 2*grid->nelems); grid->box.max.coords[i] = Min_real; grid->box.min.coords[i] = Max_real; } UFor( i, space->nobjects+1 ) { uint dim, ti; const ObjectRaySpace* object; BBox box; if (i < space->nobjects) object = &space->objects[i]; else object = &space->main; if (!object->visible) continue; ti = 2 * nvisible; if (i < space->nobjects) trxfrm_BBox (&box, &object->orientation, &object->box, &object->centroid); else box = object->box; include_BBox (&grid->box, &grid->box, &box); grid->elemidcs[nvisible] = i; UFor( dim, NDimensions ) { real lo, hi; lo = box.min.coords[dim]; hi = box.max.coords[dim]; grid->coords[dim][ti] = lo; grid->coords[dim][ti+1] = hi; } nvisible += 1; } shrink_KDTreeGrid (grid, nvisible); } void init_RayImage (RayImage* image) { uint i; image->hits = 0; image->mags = 0; image->pixels = 0; image->nrows = 0; image->stride = 0; image->ncols = 0; /* image->hifov = 60 * M_PI / 180; */ image->hifov = 2 * atan (1.0 / 3); image->perspective = true; UFor( i, NColors ) image->ambient[i] = 0.2; image->view_light = 0; image->shading_on = true; image->color_distance_on = true; image->diffuse_camera_on = false; image->nbounces_max = 2; image->culling_on = false; } void resize_RayImage (RayImage* image) { #ifdef PackOpsAvail const uint align = RayPacketDimSz; #else const uint align = 4; #endif uint npixels; if (image->nrows == 0 || image->ncols == 0) return; image->stride = align * ceil_uint (image->ncols, align); npixels = image->nrows * image->stride; if (image->hits) ResizeT( uint, image->hits, npixels ); if (image->mags) ResizeT( real, image->mags, npixels ); if (image->pixels) ResizeT( byte, image->pixels, 3 * npixels ); } void restride_RayImage (RayImage* image) { uint i; /* This should be set to the desired stride, * initialized by a resize. */ assert (image->stride >= image->ncols); if (image->stride == image->ncols) return; UFor( i, image->nrows ) { uint row, dst_idx, src_idx; row = image->nrows - i - 1; dst_idx = row * image->stride; src_idx = row * image->ncols; if (image->hits) memmove (&image->hits[dst_idx], &image->hits[src_idx], image->ncols * sizeof (uint)); if (image->mags) memmove (&image->mags[dst_idx], &image->mags[src_idx], image->ncols * sizeof (real)); if (image->pixels) memmove (&image->pixels[3 * dst_idx], &image->pixels[3 * src_idx], image->ncols * 3 * sizeof (byte)); } } /** Remove the stride from a RayImage to make * /image->cols == image->stride/. **/ void unstride_RayImage (RayImage* image) { uint row; if (image->ncols == image->stride) return; UFor( row, image->nrows ) { uint dst_idx, src_idx; dst_idx = row * image->ncols; src_idx = row * image->stride; if (image->hits) memmove (&image->hits[dst_idx], &image->hits[src_idx], image->ncols * sizeof (uint)); if (image->mags) memmove (&image->mags[dst_idx], &image->mags[src_idx], image->ncols * sizeof (real)); if (image->pixels) memmove (&image->pixels[3 * dst_idx], &image->pixels[3 * src_idx], image->ncols * 3 * sizeof (byte)); } image->stride = image->ncols; } void downsample_RayImage (RayImage* image, uint inv) { uint row, i_nrows, i_ncols, o_nrows, o_ncols; uint inv2; byte* o_pixline; byte* o_fracline; i_nrows = image->nrows; i_ncols = image->ncols; o_nrows = i_nrows / inv; o_ncols = i_ncols / inv; inv2 = inv * inv; AllocTo( o_pixline, 3 * (1 + o_ncols) ); AllocTo( o_fracline, 3 * (1 + o_ncols) ); memset (o_pixline, 0, 3 * o_ncols * sizeof(byte)); memset (o_fracline, 0, 3 * o_ncols * sizeof(byte)); if (image->pixels) { UFor( row, i_nrows ) { uint col; byte* i_pixline; i_pixline = &image->pixels[row * 3 * image->stride]; UFor( col, i_ncols ) { uint i, o_off; o_off = 3 * (col / inv); UFor( i, 3 ) { uint x, y; x = i_pixline[3*col+i]; y = (x % inv2) + o_fracline[o_off+i]; x = (x / inv2) + (y / inv2); y = (y % inv2); o_pixline[o_off+i] += x; o_fracline[o_off+i] = y; } } if ((row + 1) % inv == 0) { uint n; n = 3 * o_ncols; CopyT( byte, &image->pixels[(row/inv) * n], o_pixline, 0, n ); memset (o_pixline, 0, n * sizeof(byte)); memset (o_fracline, 0, n * sizeof(byte)); } } } UFor( row, o_nrows ) { uint col; UFor( col, o_ncols ) { uint dst_idx, src_idx; dst_idx = row * o_ncols + col; src_idx = inv * (row * image->stride + col); if (image->hits) image->hits[dst_idx] = image->hits[src_idx]; if (image->mags) image->mags[dst_idx] = image->mags[src_idx]; } } free (o_pixline); free (o_fracline); image->nrows = o_nrows; image->ncols = o_ncols; resize_RayImage (image); image->stride = image->ncols; } void cleanup_RayImage (RayImage* image) { if (image->hits) free (image->hits); if (image->mags) free (image->mags); if (image->pixels) free (image->pixels); } #endif /* #ifndef __OPENCL_VERSION__ */ void map_vertex_normal (Point* normal, const Point* vnmls, const SceneElement* elem, const Point* bpoint) { uint i; zero_Point (normal); UFor( i, NDimensions ) { assert (elem->vnmls[i] != UINT_MAX); follow_Point (normal, normal, &vnmls[elem->vnmls[i]], bpoint->coords[i]); } } static const ObjectRaySpace* ray_to_ObjectRaySpace (Point* ret_origin, Point* ret_dir, const Point* origin, const Point* dir, const RaySpace* space, uint objidx) { const ObjectRaySpace* object; Claim2( objidx ,<=, space->nobjects ); if (objidx < space->nobjects) { object = &space->objects[objidx]; ray_to_basis (ret_origin, ret_dir, &object->orientation, origin, dir, &object->centroid); } else { object = &space->main; *ret_origin = *origin; *ret_dir = *dir; } return object; } /** Assume no solids inside one another. * Otherwise, we'd need to track an IOR. **/ static void refraction_ray (Point* dst, const Point* dir, const Point* normal, real r, bool entering, real cos_normal) { Point a, b; real d; if (r == 1) { *dst = *dir; return; } /* dir * A = ------- * ||N*D|| */ scale_Point (&a, dir, 1 / cos_normal); /* B = A + N */ summ_Point (&b, &a, normal); /* 1 * d = ------------------------------------ * sqrt(((r1/r2)^2 * ||A||^2) - ||B||^2) */ if (!entering) r = 1 / r; d = r * r * dot_Point (&a, &a) - dot_Point (&b, &b); d = 1 / sqrt (d); /* dst = d*A + (1-d)(-N) */ scale_Point (&a, &a, d); scale_Point (&b, normal, d-1); summ_Point (dst, &a, &b); normalize_Point (dst, dst); } static uint splitting_plane_count (const Point* origin, const Point* direct, real mag, const KDTree* tree, const BBox* box) { uint count = 0; uint node_idx, parent, destin_nodeidx; Point destin; Point invdirect; Ray ray; bool inside_box; ray.origin = *origin; ray.direct = *direct; follow_Ray (&destin, &ray, mag); if (inside_BBox (box, &destin)) destin_nodeidx = find_KDTreeNode (&parent, &destin, tree->nodes); else destin_nodeidx = UINT_MAX; #if 0 /* Return the number of elements in the hit node.*/ return ((destin_nodeidx == UINT_MAX) ? 0 : tree->nodes[destin_nodeidx].as.leaf.nelems); #endif inside_box = inside_BBox (box, origin); reci_Point (&invdirect, direct); node_idx = first_KDTreeNode (&parent, &ray, tree->nodes, box, inside_box); if (node_idx != UINT_MAX && inside_box) count += 1; while (node_idx != UINT_MAX && node_idx != destin_nodeidx) { count += 1; node_idx = next_KDTreeNode (&parent, &ray, &invdirect, Max_real, node_idx, tree->nodes); } return count; } #ifndef __OPENCL_VERSION__ /** Get the ambient, diffuse, and specular components * of a pixel without considering illumination. **/ static void pixel_from_Material (Color* ambient, Color* diffuse, Color* specular, Color* emissive, const Material* matl, const BaryPoint* texpoint, const Scene* scene) { if (!matl) { /* If the cosine between light and normal is 1, * and the light intensity is 1, * then the resulting color value should be exactly 1. * Thus, diffuse + ambient = 1 by default. */ set_Color (ambient, .2); set_Color (diffuse, .8); set_Color (specular, 0); set_Color (emissive, 0); return; } *ambient = matl->ambient; *diffuse = matl->diffuse; *specular = matl->specular; *emissive = matl->emissive; /* Texture mapping.*/ if (texpoint) { const Texture* tex; Color color; real alpha; if (matl->ambient_texture != UINT_MAX) { tex = &scene->txtrs[matl->ambient_texture]; alpha = map_Texture (&color, tex, texpoint); mix_Color (ambient, ambient, &color, alpha); } if (matl->diffuse_texture != UINT_MAX) { tex = &scene->txtrs[matl->diffuse_texture]; alpha = map_Texture (&color, tex, texpoint); mix_Color (diffuse, diffuse, &color, alpha); } if (matl->specular_texture != UINT_MAX) { tex = &scene->txtrs[matl->specular_texture]; alpha = map_Texture (&color, tex, texpoint); mix_Color (specular, specular, &color, alpha); } } } void find_holes (TableT(uint2)* holes, const RayImage* ray_image, uint nelems) { const uint* hits = ray_image->hits; const uint stride = ray_image->stride; if (ray_image->nrows < 3 || ray_image->ncols < 3) return; if (!hits) { DBog0("hits array is null"); return; } for (uint row = 1; row < ray_image->nrows - 1; ++row) { for (uint col = 1; col < ray_image->ncols - 1; ++col) { if (nelems <= hits[(row-1) * stride + (col-1)]) continue; if (nelems <= hits[(row-1) * stride + (col+0)]) continue; if (nelems <= hits[(row-1) * stride + (col+1)]) continue; if (nelems <= hits[(row+0) * stride + (col-1)]) continue; if (nelems <= hits[(row+0) * stride + (col+1)]) continue; if (nelems <= hits[(row+1) * stride + (col-1)]) continue; if (nelems <= hits[(row+1) * stride + (col+0)]) continue; if (nelems <= hits[(row+1) * stride + (col+1)]) continue; if (nelems <= hits[(row+0) * stride + (col+0)]) { uint2 hole; hole.s[0] = row; hole.s[1] = col; PushTable( *holes, hole ); } } } } void diffuse_camera_shading (Color* ret_color, const Ray* ray, const Point* normal) { real cos_normal = dot_Point (&ray->direct, normal); if (cos_normal < 0) cos_normal = - cos_normal; {:for (dim ; NColors) ret_color->coords[dim] = cos_normal; } } void fill_pixel (Color* ret_color, uint hitidx, real mag, uint objidx, const RayImage* image, const Point* origin, const Point* dir, const RaySpace* space, Trit front, uint nbounces, URandom* urandom) { const bool shade_by_element = false; const bool color_by_element = false; const bool compute_bary_coords = true; const bool show_splitting_planes = false; Color color; const BarySimplex* simplex; const ObjectRaySpace* object; Point rel_origin, rel_dir; const Scene* scene; const SceneElement* elem; const Material* material = 0; bool hit_front; real cos_normal; Point bpoint, normal; BaryPoint texpoint; uint i; if (objidx <= space->nobjects) { set_Color (&color, 1); } else if (space->skytxtr < space->main.scene.ntxtrs) { map_sky_Texture (&color, &space->main.scene.txtrs[space->skytxtr], dir); } else { set_Color (&color, 0); } if (show_splitting_planes && objidx > space->nobjects) { const real frac = .1; real red; uint nplanes; nplanes = splitting_plane_count (origin, dir, mag, #if 1 &space->main.tree, &space->main.box #else &space->object_tree, &space->box #endif ); red = 1; UFor( i, nplanes ) red = (1 - frac) * red; UFor( i, NColors ) { if (i == 0) color.coords[i] = clamp_real (1 - red * (1 - color.coords[i]), 0, 1); else color.coords[i] = clamp_real (red * color.coords[i], 0, 1); } } if (objidx > space->nobjects) { summ_Color (ret_color, ret_color, &color); return; } object = ray_to_ObjectRaySpace (&rel_origin, &rel_dir, origin, dir, space, objidx); simplex = &object->simplices[hitidx]; hit_front = (0 >= dot_Point (&rel_dir, &simplex->plane.normal)); scene = &object->scene; elem = &scene->elems[hitidx]; if (elem->material != UINT_MAX) material = &scene->matls[elem->material]; if (image->color_distance_on && mag < image->view_light) { real val; /* Distance color scale.*/ zero_Color (&color); val = 2 * NColors * mag / image->view_light; UFor( i, 2*NColors ) { if (val < i+1) { uint idx1, idx2; idx1 = i / 2; idx2 = (idx1 + 1) % NColors; if (even_uint (i)) { color.coords[idx1] = 1; color.coords[idx2] = val - i; } else { color.coords[idx1] = i+1 - val; color.coords[idx2] = 1; } break; } } } if (compute_bary_coords) { Point rel_isect; follow_Point (&rel_isect, &rel_origin, &rel_dir, mag); barycentric_Point (&bpoint, &rel_isect, simplex); if (elem->txpts[0] < UINT_MAX) { Op_s( real, NDimensions-1, texpoint.coords , 0 ); UFor( i, NDimensions ) { assert (elem->txpts[i] < UINT_MAX); Op_2020s( real, NDimensions-1, texpoint.coords ,+, texpoint.coords , *, scene->txpts[elem->txpts[i]].coords , bpoint.coords[i] ); } } } /* Get the normal.*/ if (material && material->bump_texture < UINT_MAX) map_bump_Texture (&normal, &scene->txtrs[material->bump_texture], &texpoint); else if (compute_bary_coords && 0 < scene->nvnmls) map_vertex_normal (&normal, scene->vnmls, elem, &bpoint); else copy_Point (&normal, &simplex->plane.normal); /* Rotate it when necessary.*/ if (objidx != space->nobjects) trxfrm_Point (&normal, &object->orientation, &normal); /* The above cases do not give a normalized vector!*/ normalize_Point (&normal, &normal); if (image->diffuse_camera_on) { Ray ray; ray.origin = *origin; ray.direct = *dir; diffuse_camera_shading (ret_color, &ray, &normal); return; } /* Assure the normal is in our direction.*/ cos_normal = dot_Point (dir, &normal); if (hit_front) cos_normal = - cos_normal; else negate_Point (&normal, &normal); if (color_by_element) { uint color_diff, x, y; const uint nincs = 256; assert (NColors == 3); color_diff = 0xFFFFFF / scene->nelems; x = color_diff * (scene->nelems - hitidx); y = 0; UFor( i, 3 ) { uint j; UFor( j, 8 ) { if (0 != (x & (1 << (i + 3*j)))) y |= (1 << (8*i + j)); } } color.coords[0] *= (real) ((y & 0xFF0000) >> 16) / (nincs-1); color.coords[1] *= (real) ((y & 0x00FF00) >> 8) / (nincs-1); color.coords[2] *= (real) ((y & 0x0000FF) >> 0) / (nincs-1); } if (shade_by_element) { real scale = 0; scale = 1 - (real) hitidx / scene->nelems; scale_Color (&color, &color, scale); } else if (image->shading_on) { Point isect, refldir; Color dscale, sscale; Color ambient, diffuse, specular, emissive; pixel_from_Material (&ambient, &diffuse, &specular, &emissive, material, (compute_bary_coords ? &texpoint : 0), scene); zero_Color (&dscale); zero_Color (&sscale); follow_Point (&refldir, dir, &normal, 2 * cos_normal); follow_Point (&isect, origin, dir, mag); {:for (light_idx ; space->nlights) real tscale, magtolight; Point tolight; const PointLightSource* const light = &space->lights[light_idx]; if (!light->on) continue; diff_Point (&tolight, &light->location, &isect); magtolight = magnitude_Point (&tolight); scale_Point (&tolight, &tolight, 1 / magtolight); tscale = dot_Point (&tolight, &normal); if (tscale > 0) { Ray tolight_ray; if (light->hemisphere) { real dot = - dot_Point (&tolight, &light->direct); if (dot <= 0) continue; tscale *= dot; } tolight_ray.origin = isect; tolight_ray.direct = tolight; { const real dot = dot_Point (&simplex->plane.normal, &tolight_ray.direct); if (dot < 0) { const real adjust = 1e2*Epsilon_real * taximag_Point (&isect); if (true || dot < 10 * (M_PI/180)) continue; magtolight -= adjust; follow_Point (&tolight_ray.origin, &tolight_ray.origin, &tolight_ray.direct, adjust); } } if (cast_to_light (space, &tolight_ray, hit_front ? Yes : Nil, magtolight)) { real dist_factor = 1; tscale *= dist_factor; /* Add diffuse portion.*/ follow_Color (&dscale, &dscale, &light->intensity, tscale); /* Specular */ if (!light->diffuse && material) { real dot; dot = dot_Point (&refldir, &tolight); if (dot > 0) { tscale = pow (dot, material->shininess); Op_20s( real, NColors, sscale.coords ,+, sscale.coords , tscale ); } } } } } UFor( i, NColors ) color.coords[i] *= ambient.coords[i] + diffuse.coords[i] * dscale.coords[i] + specular.coords[i] * sscale.coords[i] + emissive.coords[i]; if (space->lightcuts.nodes.sz > 0) { Color tmp; RayHit hit; hit.isect = isect; negate_Point (&hit.incid, dir); hit.normal = normal; hit.front = front; hit.mag = mag; cast_LightCutTree (&tmp, &space->lightcuts, &diffuse, &hit, space, urandom); summ_Color (&color, &color, &tmp); } if (material && material->opacity < 1) { Point tmp_dir; Color factor; scale_Color (&color, &color, material->opacity); scale_Color (&factor, &material->transmission, 1 - material->opacity); refraction_ray (&tmp_dir, dir, &normal, material->optical_density, hit_front, cos_normal); cast_colors (&color, space, image, &isect, &tmp_dir, &factor, front == Nil ? Yes : Nil, nbounces, urandom); } if (material && material->reflective) { Color factor; scale_Color (&factor, &material->specular, material->opacity); cast_colors (&color, space, image, &isect, &refldir, &factor, front, nbounces, urandom); } } summ_Color (ret_color, ret_color, &color); } #endif /* #ifndef __OPENCL_VERSION__ */ static void test_intersections (uint* restrict ret_hit, real* restrict ret_mag, const Ray* restrict ray, uint nelemidcs, __global const uint* restrict elemidcs, __global const BarySimplex* restrict simplices, __global const Simplex* restrict tris, Trit front) { uint hit_idx = *ret_hit; real hit_mag = *ret_mag; {:for ( i ; nelemidcs ) bool didhit; uint tmp_hit = elemidcs[i]; real tmp_mag; if (BarycentricRayTrace) { const BarySimplex simplex = simplices[tmp_hit]; didhit = hit_BarySimplex (&tmp_mag, ray, &simplex, front); } else { didhit = hit_Simplex (&tmp_mag, *ray, tris[tmp_hit], front); } //if (DBogTraceOn) // DBog2( "Test:%u hit?:%s", tmp_hit, didhit ? "yes" : "no" ); if (didhit && tmp_mag < hit_mag) { hit_idx = tmp_hit; hit_mag = tmp_mag; } } *ret_hit = hit_idx; *ret_mag = hit_mag; } void cast_Ray (uint* restrict ret_hit, real* restrict ret_mag, const Ray* restrict ray, const uint nelems, __global const uint* restrict elemidcs, __global const KDTreeNode* restrict nodes, __global const BarySimplex* restrict simplices, __global const Simplex* restrict tris, const BBox* restrict box, bool inside_box, Trit front) { Point invdirect; uint node_idx, parent = 0; uint hit_idx = *ret_hit; real hit_mag = *ret_mag; if (!KDTreeRayTrace) { test_intersections (&hit_idx, &hit_mag, ray, nelems, elemidcs, simplices, tris, front); *ret_hit = hit_idx; *ret_mag = hit_mag; return; } reci_Point (&invdirect, &ray->direct); { const BBox tmp_box = *box; node_idx = first_KDTreeNode (&parent, ray, nodes, &tmp_box, inside_box); } while (node_idx != UINT_MAX) { __global const KDTreeLeaf* restrict leaf; leaf = &nodes[node_idx].as.leaf; test_intersections (&hit_idx, &hit_mag, ray, leaf->nelems, &elemidcs[leaf->elemidcs], simplices, tris, front); node_idx = next_KDTreeNode (&parent, ray, &invdirect, hit_mag, node_idx, nodes); } *ret_hit = hit_idx; *ret_mag = hit_mag; } void cast1_ObjectRaySpace (uint* ret_hit, real* ret_mag, const Point* origin, const Point* direct, const ObjectRaySpace* object, bool inside_box, Trit front) { Ray ray; ray.origin = *origin; ray.direct = *direct; cast_Ray (ret_hit, ret_mag, &ray, object->nelems, object->tree.elemidcs, object->tree.nodes, object->simplices, object->elems, &object->box, inside_box, front); } void cast_nopartition (uint* ret_hit, real* ret_mag, uint* ret_object, const RaySpace* restrict space, const Point* restrict origin, const Point* restrict dir, bool inside_box, Trit front, uint ignore_object) { uint i; uint hit_idx; real hit_mag; uint hit_object; hit_idx = *ret_hit; hit_mag = *ret_mag; hit_object = *ret_object; if (space->main.visible) cast1_ObjectRaySpace (&hit_idx, &hit_mag, origin, dir, &space->main, inside_box, front); if (hit_idx < space->main.nelems) hit_object = space->nobjects; UFor( i, space->nobjects ) { Point rel_origin, rel_dir; const ObjectRaySpace* object; bool rel_inside_box; /* Returns from ray cast.*/ uint tmp_hit; real tmp_mag; if (i == ignore_object || !space->objects[i].visible) continue; object = ray_to_ObjectRaySpace (&rel_origin, &rel_dir, origin, dir, space, i); rel_inside_box = inside_BBox (&object->box, &rel_origin); tmp_hit = UINT_MAX; tmp_mag = *ret_mag; cast1_ObjectRaySpace (&tmp_hit, &tmp_mag, &rel_origin, &rel_dir, object, rel_inside_box, front); if (tmp_mag < hit_mag) { hit_idx = tmp_hit; hit_mag = tmp_mag; hit_object = i; } } *ret_hit = hit_idx; *ret_mag = hit_mag; *ret_object = hit_object; } #ifndef __OPENCL_VERSION__ static void test_object_intersections (uint* ret_hit, real* ret_mag, uint* ret_object, BitTable tested, const Ray* ray, uint nobjectidcs, const uint* objectidcs, const RaySpace* space, Trit front) { uint i; UFor( i, nobjectidcs ) { uint objidx; Point rel_origin, rel_dir; const ObjectRaySpace* object; bool rel_inside_box; /* Returns from ray cast.*/ uint tmp_hit; real tmp_mag; objidx = objectidcs[i]; if (set1_BitTable (tested, objidx)) continue; object = ray_to_ObjectRaySpace (&rel_origin, &rel_dir, &ray->origin, &ray->direct, space, objidx); rel_inside_box = inside_BBox (&object->box, &rel_origin); tmp_hit = UINT_MAX; tmp_mag = *ret_mag; cast1_ObjectRaySpace (&tmp_hit, &tmp_mag, &rel_origin, &rel_dir, object, rel_inside_box, front); if (tmp_mag < *ret_mag) { *ret_hit = tmp_hit; *ret_mag = tmp_mag; *ret_object = objidx; } } } static void cast_partitioned (uint* ret_hit, real* ret_mag, uint* ret_object, const RaySpace* restrict space, const Point* restrict origin, const Point* restrict dir, bool inside_box, Trit front, uint ignore_object) { FixDeclBitTable( tested, 128, 0 ); uint node_idx, parent = 0; const KDTreeNode* restrict nodes; const uint* restrict elemidcs; Point invdirect; uint hit_idx; real hit_mag; uint hit_object; Ray ray; copy_Point (&ray.origin, origin); copy_Point (&ray.direct, dir); assert (space->nobjects < tested.sz); if (ignore_object <= space->nobjects) set1_BitTable (tested, ignore_object); nodes = space->object_tree.nodes; elemidcs = space->object_tree.elemidcs; hit_idx = *ret_hit; hit_mag = *ret_mag; hit_object = *ret_object; reci_Point (&invdirect, &ray.direct); node_idx = first_KDTreeNode (&parent, &ray, nodes, &space->box, inside_box); while (node_idx != UINT_MAX) { __global const KDTreeLeaf* restrict leaf; leaf = &nodes[node_idx].as.leaf; test_object_intersections (&hit_idx, &hit_mag, &hit_object, tested, &ray, leaf->nelems, &elemidcs[leaf->elemidcs], space, front); node_idx = next_KDTreeNode (&parent, &ray, &invdirect, hit_mag, node_idx, nodes); } *ret_hit = hit_idx; *ret_mag = hit_mag; *ret_object = hit_object; } void cast1_RaySpace (uint* ret_hit, real* ret_mag, uint* ret_objidx, const Ray* ray, const RaySpace* space, Trit front) { bool inside_box = inside_BBox (&space->box, &ray->origin); if (space->partition) cast_partitioned (ret_hit, ret_mag, ret_objidx, space, &ray->origin, &ray->direct, inside_box, front, UINT_MAX); else cast_nopartition (ret_hit, ret_mag, ret_objidx, space, &ray->origin, &ray->direct, inside_box, front, UINT_MAX); } void cast_colors (Color* ret_color, const RaySpace* restrict space, const RayImage* restrict image, const Point* restrict origin, const Point* restrict dir, const Color* factor, Trit front, uint nbounces, URandom* urandom) { Color color; uint hit_idx = UINT_MAX; real hit_mag = Max_real; uint hit_object = UINT_MAX; bool inside_box; if (nbounces >= image->nbounces_max) return; if (space->partition) { inside_box = inside_BBox (&space->box, origin); cast_partitioned (&hit_idx, &hit_mag, &hit_object, space, origin, dir, inside_box, front, UINT_MAX); } else { inside_box = inside_BBox (&space->main.box, origin); cast_nopartition (&hit_idx, &hit_mag, &hit_object, space, origin, dir, inside_box, front, UINT_MAX); } zero_Color (&color); fill_pixel (&color, hit_idx, hit_mag, hit_object, image, origin, dir, space, front, nbounces+1, urandom); prod_Color (&color, &color, factor); summ_Color (ret_color, ret_color, &color); } bool cast_to_light (const RaySpace* restrict space, const Ray* restrict ray, Trit front, real magtolight) { uint hit_idx = UINT_MAX; real hit_mag = magtolight; uint hit_object = UINT_MAX; cast1_RaySpace (&hit_idx, &hit_mag, &hit_object, ray, space, front); return approx_eql (magtolight, hit_mag, 1, 1e2); } static void cast_record (uint* hitline, real* magline, byte* pixline, uint col, const RaySpace* restrict space, const RayImage* restrict image, const Point* restrict origin, const Point* restrict dir, bool inside_box, URandom* urandom) { uint hit_idx = UINT_MAX; real hit_mag = Max_real; uint hit_object = UINT_MAX; const Trit front = image->culling_on ? Yes : May; if (space->partition) cast_partitioned (&hit_idx, &hit_mag, &hit_object, space, origin, dir, inside_box, front, UINT_MAX); else cast_nopartition (&hit_idx, &hit_mag, &hit_object, space, origin, dir, inside_box, front, UINT_MAX); if (hitline) hitline[col] = hit_idx; if (magline) magline[col] = hit_mag; if (pixline) { Color color; zero_Color (&color); fill_pixel (&color, hit_idx, hit_mag, hit_object, image, origin, dir, space, front, 0, urandom); {:for (i ; NColors) pixline[3*col+i] = (byte) clamp_real (255.5 * color.coords[i], 0, 255.5); } } } #endif /* #ifndef __OPENCL_VERSION__ */ #ifndef __OPENCL_VERSION__ void rays_to_hits_fish (RayImage* restrict image, const RaySpace* space, const Point* origin, const PointXfrm* view_basis, real view_angle) { #ifndef _WIN32 uint row; #else int row; #endif bool inside_box; const uint row_dim = UpDim; const uint col_dim = RightDim; const uint dir_dim = ForwardDim; uint nrows, ncols; real col_start, row_start; real col_delta, row_delta; nrows = image->nrows; ncols = image->ncols; row_start = - view_angle / 2; row_delta = view_angle / nrows; row_start += row_delta / 2; col_start = - view_angle / 2; col_delta = view_angle / ncols; col_start += col_delta / 2; inside_box = inside_BBox (&space->box, origin); #pragma omp parallel { URandom urandom[1]; #ifdef _OPENMP init2_URandom (urandom, omp_get_thread_num(), omp_get_num_threads()); #else init_URandom (urandom); #endif #pragma omp for schedule(dynamic) UFor( row, nrows ) { uint col; real row_angle; uint* hitline = 0; real* magline = 0; byte* pixline = 0; if (image->hits) hitline = &image->hits[row * image->stride]; if (image->mags) magline = &image->mags[row * image->stride]; if (image->pixels) pixline = &image->pixels[row * 3 * image->stride]; row_angle = row_start + row_delta * row; UFor( col, ncols ) { Point tdir, dir; real col_angle; col_angle = col_start + col_delta * col; zero_Point (&dir); dir.coords[row_dim] = sin (row_angle); dir.coords[col_dim] = sin (col_angle); dir.coords[dir_dim] = cos (row_angle) + cos (col_angle); trxfrm_Point (&tdir, view_basis, &dir); #if 0 dir.coords[row_dim] = (tdir.coords[row_dim] * (1 + cos (row_angle)) + tdir.coords[dir_dim] * sin (row_angle)); dir.coords[col_dim] = (tdir.coords[col_dim] * (1 + cos (col_angle)) + tdir.coords[dir_dim] * sin (col_angle)); dir.coords[dir_dim] = (tdir.coords[dir_dim] * (cos (row_angle) + cos (col_angle)) - tdir.coords[row_dim] * sin (row_angle) - tdir.coords[col_dim] * sin (col_angle)); #endif normalize_Point (&dir, &tdir); cast_record (hitline, magline, pixline, col, space, image, origin, &dir, inside_box, urandom); } } } } void rays_to_hits_fixed_plane (uint* hits, real* mags, uint nrows, uint ncols, uint stride, const RaySpace* space, real zpos) { #ifndef _WIN32 uint row; #else int row; #endif bool inside_box; const uint row_dim = UpDim; const uint col_dim = RightDim; const uint dir_dim = ForwardDim; Point origin; real col_start, row_start; real col_delta, row_delta; const BBox* box; const ObjectRaySpace* object; object = &space->main; box = &object->box; row_start = box->min.coords[row_dim]; row_delta = (box->max.coords[row_dim] - row_start) / nrows; row_start += row_delta / 2; col_start = box->min.coords[col_dim]; col_delta = (box->max.coords[col_dim] - col_start) / ncols; col_start += col_delta / 2; inside_box = (zpos > box->min.coords[dir_dim] && zpos < box->max.coords[dir_dim]); origin.coords[dir_dim] = zpos; origin.coords[row_dim] = 50; origin.coords[col_dim] = 50; inside_box = inside_BBox (box, &origin); #pragma omp parallel for schedule(dynamic) UFor( row, nrows ) { uint col; uint* hitline; real* magline; hitline = &hits[row * stride]; magline = &mags[row * stride]; UFor( col, ncols ) { Point dir; uint hit; real mag; /* if (! (row == 333 && col == 322)) continue; */ zero_Point (&dir); dir.coords[row_dim] = row_start + row * row_delta; dir.coords[col_dim] = col_start + col * col_delta; diff_Point (&dir, &dir, &origin); normalize_Point (&dir, &dir); hit = object->nelems; mag = Max_real; cast1_ObjectRaySpace (&hit, &mag, &origin, &dir, object, inside_box, Yes); hitline[col] = hit; magline[col] = mag; /* if (row == 333 && col == 322) puts (elem ? "hit" : "miss"); */ } } } void cast_row_orthographic (RayImage* restrict image, uint row, const RaySpace* restrict space, const RayCastAPriori* restrict known, URandom* urandom) { uint col, ncols; const BBox* box; const Point* dir; uint* hitline = 0; real* magline = 0; byte* pixline = 0; ncols = image->ncols; box = &space->box; if (image->hits) hitline = &image->hits[row * image->stride]; if (image->mags) magline = &image->mags[row * image->stride]; if (image->pixels) pixline = &image->pixels[row * 3 * image->stride]; dir = &known->basis.pts[FwDim]; UFor( col, ncols ) { Point ray_origin; bool inside_box; ray_origin = known->origin; follow_Point (&ray_origin, &ray_origin, &known->basis.pts[UpDim], known->up_scale * (-1 + (2*row+1.0) / image->nrows)); follow_Point (&ray_origin, &ray_origin, &known->basis.pts[RtDim], known->rt_scale * (-1 + (2*col+1.0) / image->ncols)); inside_box = inside_BBox (box, &ray_origin); cast_record (hitline, magline, pixline, col, space, image, &ray_origin, dir, inside_box, urandom); } } void cast_row_perspective (RayImage* image, uint row, const RaySpace* restrict space, const RayCastAPriori* restrict known, URandom* urandom) { uint col, ncols; const Point* origin; uint* hitline = 0; real* magline = 0; byte* pixline = 0; ncols = image->ncols; if (image->hits) hitline = &image->hits[row * image->stride]; if (image->mags) magline = &image->mags[row * image->stride]; if (image->pixels) pixline = &image->pixels[row * 3 * image->stride]; origin = &known->origin; UFor( col, ncols ) { Point dir; #if 0 if (row == 103 && col == 61) { DBogTraceOn = true; DBog0( "TRACE ON" ); } #endif #if 0 if (! (image->nrows - 1 - row == 31 && col == 27) && ! (image->nrows - 1 - row == 31 && col == 28) && ! (image->nrows - 1 - row == 31 && col == 26)) { if (hitline) hitline[col] = UINT_MAX; if (magline) magline[col] = Max_real; if (pixline) { pixline[3*col+0] = 255; pixline[3*col+1] = 255; pixline[3*col+2] = 255; } continue; } fprintf (stderr, "\ncol:%u ", col); #endif zero_Point (&dir); dir.coords[UpDim] = known->up_scale * (-1 + (2*row+1.0) / image->nrows); dir.coords[RtDim] = known->rt_scale * (-1 + (2*col+1.0) / image->ncols); dir.coords[FwDim] = 1; trxfrm_Point (&dir, &known->basis, &dir); normalize_Point (&dir, &dir); cast_record (hitline, magline, pixline, col, space, image, origin, &dir, known->inside_box, urandom); //if (DBogTraceOn) // DBogTraceOn = false; #if 0 { static bool missed = false; if (!missed && hitline[col] == UINT_MAX) { printf ("row:%u col:%u\n", row, col); missed = true; } } #endif } } #ifdef TrivialMpiRayTrace void balancer_triv_sync_mpi_RayImage (RayImage* image, uint row_off, uint row_nul, uint nprocs) { uint proc, ncols; ncols = image->ncols; for (proc = 1; proc < nprocs; ++proc) { uint i; for (i = proc; i < row_nul; i += nprocs) { MPI_Status status; uint row; row = row_off + i; if (image->hits) MPI_Recv (&image->hits[row*ncols], ncols * sizeof(uint), MPI_BYTE, proc, 1, MPI_COMM_WORLD, &status); if (image->mags) MPI_Recv (&image->mags[row*ncols], ncols * sizeof(real), MPI_BYTE, proc, 1, MPI_COMM_WORLD, &status); if (image->pixels) MPI_Recv (&image->pixels[row*3*ncols], 3 * ncols, MPI_BYTE, proc, 1, MPI_COMM_WORLD, &status); } } } void computer_triv_sync_mpi_RayImage (const RayImage* image, uint row_off, uint row_nul, uint myrank, uint nprocs) { uint i, ncols; ncols = image->ncols; for (i = myrank; i < row_nul; i += nprocs) { uint row; row = row_off + i; if (image->hits) MPI_Send (&image->hits[row*ncols], ncols * sizeof(uint), MPI_BYTE, 0, 1, MPI_COMM_WORLD); if (image->mags) MPI_Send (&image->mags[row*ncols], ncols * sizeof(real), MPI_BYTE, 0, 1, MPI_COMM_WORLD); if (image->pixels) MPI_Send (&image->pixels[row*3*ncols], 3 * ncols, MPI_BYTE, 0, 1, MPI_COMM_WORLD); } } #endif void setup_RayCastAPriori (RayCastAPriori* dst, const RayImage* image, const Point* origin, const PointXfrm* view_basis, const BBox* box) { dst->origin = *origin; dst->basis = *view_basis; if (image->perspective) { dst->up_scale = tan(.5 * image->hifov); dst->inside_box = inside_BBox (box, origin); } else { dst->up_scale = .5 * image->hifov; dst->inside_box = false; } dst->rt_scale = dst->up_scale * ((real)image->ncols / image->nrows); } void ray_from_RayCastAPriori (Ray* ray, const RayCastAPriori* known, uint row, uint col, const RayImage* image) { ray->origin = known->origin; if (image->perspective) { zero_Point (&ray->direct); ray->direct.coords[UpDim] = known->up_scale * (-1 + (2*row+1.0) / image->nrows); ray->direct.coords[RtDim] = known->rt_scale * (-1 + (2*col+1.0) / image->ncols); ray->direct.coords[FwDim] = 1; trxfrm_Point (&ray->direct, &known->basis, &ray->direct); normalize_Point (&ray->direct, &ray->direct); } else { follow_Point (&ray->origin, &ray->origin, &known->basis.pts[UpDim], known->up_scale * (-1 + (2*row+1.0) / image->nrows)); follow_Point (&ray->origin, &ray->origin, &known->basis.pts[RtDim], known->rt_scale * (-1 + (2*col+1.0) / image->ncols)); ray->direct = known->basis.pts[FwDim]; } } void cast_partial_RayImage (RayImage* restrict image, uint row_off, uint row_nul, const RaySpace* restrict space, const RayCastAPriori* restrict known) { #ifndef _WIN32 uint i; #else int i; #endif uint inc, nprocs, myrank; #ifdef TrivialMpiRayTrace MPI_Comm_size (MPI_COMM_WORLD, (int*) &nprocs); MPI_Comm_rank (MPI_COMM_WORLD, (int*) &myrank); #else myrank = 0; nprocs = 1; #endif #ifdef PackOpsAvail inc = RayPacketDimSz * nprocs; #else inc = nprocs; #endif #pragma omp parallel { URandom urandom[1]; uint threadid = myrank; uint nthreads = nprocs; #ifdef _OPENMP // Assume all MPI processes have the same number of threads. nthreads *= omp_get_num_threads(); threadid = omp_get_thread_num() + myrank * (nthreads/nprocs); #endif init2_URandom (urandom, threadid, nthreads); #pragma omp for schedule(dynamic) for (i = myrank; i < row_nul; i += inc) { #ifdef PackOpsAvail uint j; if (RayPacketDimSz <= row_nul) { for (j = 0; j < image->ncols; j += RayPacketDimSz) cast_packet_RayImage (image, row_off + i, j, space, known); } else { uint n; n = row_nul - i; UFor( j, n ) if (image->perspective) cast_row_perspective (image, row_off + i + j, space, known, urandom); else cast_row_orthographic (image, row_off + i + j, space, known, urandom); } #else if (image->perspective) cast_row_perspective (image, row_off + i, space, known, urandom); else cast_row_orthographic (image, row_off + i, space, known, urandom); #endif } } #ifdef TrivialMpiRayTrace if (myrank == 0) balancer_triv_sync_mpi_RayImage (image, row_off, row_nul, nprocs); else computer_triv_sync_mpi_RayImage (image, row_off, row_nul, myrank, nprocs); #endif } void cast_RayImage (RayImage* restrict image, const RaySpace* restrict space, const Point* restrict origin, const PointXfrm* restrict view_basis) { RayCastAPriori known; setup_RayCastAPriori (&known, image, origin, view_basis, &space->box); cast_partial_RayImage (image, 0, image->nrows, space, &known); } #endif /* #ifndef __OPENCL_VERSION__ */
simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd safelen(4) void test_no_clause() { int i; #pragma omp simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp simd' must be a for loop}} #pragma omp simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd firstprivate(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, ) for (i = 0; i < 16; ++i) ; // xxpected-error@+1 {{expected expression}} #pragma omp simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} #pragma omp simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp simd collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp simd collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as reduction}} #pragma omp parallel #pragma omp simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; #pragma omp parallel #pragma omp for for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) #pragma omp simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp simd private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp simd private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate() { int i; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}} // expected-error@+1 {{expected expression}} #pragma omp simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp simd lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp simd lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction() { int i, x, y; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction() for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(x) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected identifier}} #pragma omp simd reduction( : x) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(, for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(+ for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+: for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected identifier}} #pragma omp simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; // expected-error@+1 {{expected variable name}} #pragma omp simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void linear_modifiers(int argc) { int f; #pragma omp simd linear(f) for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(val(f)) for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(uval(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(ref(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(foo(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; }
top_k_v2_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* The reason why we need the topk v2 is because the compatibility. We redefine the NaN is maximum value in the process of comparing. If do not add the topk v2, will affect the inference result of model that traing by the older version paddlepaddle. */ #pragma once #include <algorithm> #include <iostream> #include <utility> #include <vector> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/operators/transpose_op.h" namespace paddle { namespace operators { inline void GetDims(const framework::DDim& dim, int axis, int* pre, int* n, int* post) { *pre = 1; *post = 1; *n = dim[axis]; for (int i = 0; i < axis; ++i) { (*pre) *= dim[i]; } for (int i = axis + 1; i < dim.size(); ++i) { (*post) *= dim[i]; } } template <typename T, typename Type> static void FullTopK(Type input_height, Type input_width, int input_dim, const framework::Tensor* input, T* t_out, Type* t_indices, const int& k, const bool& largest, const bool& sorted) { // when the k is small, will the partial sort bool partial_sort_flag = (k * 64) < input_width; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif // Eigen::DSizes<int, 2> flat2dims(input_height, input_width); for (Type i = 0; i < input_height; ++i) { std::vector<std::pair<T, Type>> col_vec; col_vec.reserve(input_width); if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(j), j)); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j)); } } if (partial_sort_flag) { std::partial_sort( col_vec.begin(), col_vec.begin() + k, col_vec.end(), [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { if (largest) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); } else { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); } }); } else { // use the nth-element to get the K-larger or K-small element if (largest) { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort(col_vec.begin(), col_vec.begin() + k - 1, [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); } } else { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort( col_vec.begin(), col_vec.begin() + k - 1, [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); } } } for (Type j = 0; j < k; ++j) { t_out[i * k + j] = col_vec[j].first; t_indices[i * k + j] = col_vec[j].second; } } } template <typename T, typename Type> static void FullTopKAssign(const Type& input_height, const Type& input_width, const int& input_dim, const framework::Tensor* input, const framework::Tensor* indices, T* output_data, const int& k) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (Type i = 0; i < input_height; ++i) { if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); auto e_indices = framework::EigenVector<Type>::Flatten(*indices); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(j)] = e_input(j); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); auto e_indices = framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(i, j)] = e_input(i, j); } } } } template <typename DeviceContext, typename T> class TopkV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { // Get the top k elements of each row of input tensor auto* input = context.Input<Tensor>("X"); auto* output = context.Output<Tensor>("Out"); auto* indices = context.Output<Tensor>("Indices"); const auto& in_dims = input->dims(); int k = static_cast<int>(context.Attr<int>("k")); const auto& sorted = static_cast<bool>(context.Attr<bool>("sorted")); const auto& largest = static_cast<bool>(context.Attr<bool>("largest")); // axis < 0, cacluate the real axis int axis = static_cast<int>(context.Attr<int>("axis")); if (axis < 0) axis += in_dims.size(); // if K tensor is not null, will the use K tesnor as k auto* k_t = context.Input<Tensor>("K"); if (k_t) { k = k_t->data<int>()[0]; framework::DDim output_dims = output->dims(); // accroding to axis to set K value in the dim output_dims[axis] = k; output->Resize(output_dims); indices->Resize(output_dims); } T* output_data = output->mutable_data<T>(context.GetPlace()); int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace()); const auto& out_dims = output->dims(); if (axis + 1 == in_dims.size()) { const int64_t& input_height = pten::product(pten::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), input, output_data, indices_data, k, largest, sorted); } else { // if the topk dims is not last dim, will tranpose and do topk std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.push_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); // get the trans input_dims, out_dims framework::DDim trans_dims(in_dims); framework::DDim trans_out_dims(output->dims()); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; } for (size_t i = 0; i < trans.size(); i++) { trans_out_dims[i] = out_dims[trans[i]]; } Tensor trans_inp; trans_inp.mutable_data<T>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // transpose the input value TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input, &trans_inp, trans); const int64_t input_height = pten::product(pten::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; // Allocate the temp tensor to the save the topk indices, values Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_out_dims, context.GetPlace()); Tensor tmp_indices; auto* t_ind = tmp_indices.mutable_data<int64_t>(trans_out_dims, context.GetPlace()); // get the TopK value FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), &trans_inp, t_out, t_ind, k, largest, sorted); // transpose back TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, tmp_indices, indices, trans); TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, output, trans); } } }; template <typename DeviceContext, typename T> class TopkV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out")); auto* indices = context.Input<Tensor>("Indices"); auto* x_grad = context.Output<Tensor>(framework::GradVarName("X")); int axis = static_cast<int>(context.Attr<int>("axis")); const auto& in_dims = x->dims(); const auto& out_dims = indices->dims(); // axis < 0, get the real axis axis = (axis < 0) ? (in_dims.size() + axis) : axis; const size_t& k = out_dims[axis]; T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace()); if (axis + 1 == in_dims.size()) { // allocate the memory for the input_grad // assign the out_grad to input_grad directly const int64_t input_height = pten::product(pten::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t input_width = in_dims[in_dims.size() - 1]; // init the output grad with 0, because some input elements has no grad memset(x_grad_data, 0, x_grad->numel() * sizeof(T)); // Assign the output_grad to input_grad FullTopKAssign(input_height, input_width, in_dims.size(), out_grad, indices, x_grad_data, k); } else { // can not assign grad to input_grad, must do the transpose std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(out_dims.size() - 1); for (int i = axis + 1; i < out_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); framework::DDim trans_dims(out_dims); framework::DDim trans_in_dims(in_dims); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = out_dims[trans[i]]; trans_in_dims[i] = in_dims[trans[i]]; } // transpose the out_grad, indices Tensor trans_dO; trans_dO.mutable_data<T>(trans_dims, context.GetPlace()); Tensor trans_ind; trans_ind.mutable_data<int64_t>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // Do transpose TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *out_grad, &trans_dO, trans); TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, *indices, &trans_ind, trans); const int64_t input_height = pten::product( pten::slice_ddim(trans_in_dims, 0, trans_in_dims.size() - 1)); const int64_t input_width = trans_in_dims[trans_in_dims.size() - 1]; // Assign the out_grad to tranpose input_grad Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_in_dims, context.GetPlace()); memset(t_out, 0, x_grad->numel() * sizeof(T)); FullTopKAssign<T, int64_t>(input_height, input_width, in_dims.size(), &trans_dO, &trans_ind, t_out, k); // Transpose back TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, x_grad, trans); } } }; } // namespace operators } // namespace paddle
convolution_3x3_pack4to1_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 4u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16)); float32x4_t _r01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0 + 4), 16)); float32x4_t _r02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0 + 8), 16)); float32x4_t _r03 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0 + 12), 16)); float32x4_t _r04 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0 + 16), 16)); float32x4_t _r05 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0 + 20), 16)); float32x4_t _r06 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0 + 24), 16)); float32x4_t _r07 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0 + 28), 16)); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + tiles % 12 % 4, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0] \n" "sub %0, %0, #128 \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v18.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v19.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "sub %0, %0, #64 \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #256] \n" "vld4.f32 {d0-d3}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d4-d7}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d16-d19}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d20-d23}, [%0 :128] \n" "sub %0, %0, #96 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" "vst1.f32 {d4-d5}, [%1 :128]! \n" "vst1.f32 {d20-d21}, [%1 :128]! \n" "vst1.f32 {d2-d3}, [%1 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" "vst1.f32 {d22-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #256] \n" "vld4.f32 {d0-d3}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d4-d7}, [%0 :128] \n" "sub %0, %0, #32 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d4-d5}, [%1 :128]! \n" "vst1.f32 {d2-d3}, [%1 :128]! \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; #if __aarch64__ nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); float* output4_tm = top_blob_tm.channel(p + 4); float* output5_tm = top_blob_tm.channel(p + 5); float* output6_tm = top_blob_tm.channel(p + 6); float* output7_tm = top_blob_tm.channel(p + 7); const Mat kernel01_tm = kernel_tm.channel(p / 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v23.4s, v0.4s, v5.s[1] \n" "fmla v26.4s, v0.4s, v5.s[2] \n" "fmla v29.4s, v0.4s, v5.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v21.4s, v1.4s, v5.s[0] \n" "fmla v24.4s, v1.4s, v5.s[1] \n" "fmla v27.4s, v1.4s, v5.s[2] \n" "fmla v30.4s, v1.4s, v5.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "fmla v22.4s, v2.4s, v5.s[0] \n" "fmla v25.4s, v2.4s, v5.s[1] \n" "fmla v28.4s, v2.4s, v5.s[2] \n" "fmla v31.4s, v2.4s, v5.s[3] \n" "fmla v8.4s, v3.4s, v6.s[0] \n" "fmla v11.4s, v3.4s, v6.s[1] \n" "fmla v14.4s, v3.4s, v6.s[2] \n" "fmla v17.4s, v3.4s, v6.s[3] \n" "fmla v20.4s, v3.4s, v7.s[0] \n" "fmla v23.4s, v3.4s, v7.s[1] \n" "fmla v26.4s, v3.4s, v7.s[2] \n" "fmla v29.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v9.4s, v0.4s, v6.s[0] \n" "fmla v12.4s, v0.4s, v6.s[1] \n" "fmla v15.4s, v0.4s, v6.s[2] \n" "fmla v18.4s, v0.4s, v6.s[3] \n" "fmla v21.4s, v0.4s, v7.s[0] \n" "fmla v24.4s, v0.4s, v7.s[1] \n" "fmla v27.4s, v0.4s, v7.s[2] \n" "fmla v30.4s, v0.4s, v7.s[3] \n" "fmla v10.4s, v1.4s, v6.s[0] \n" "fmla v13.4s, v1.4s, v6.s[1] \n" "fmla v16.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v22.4s, v1.4s, v7.s[0] \n" "fmla v25.4s, v1.4s, v7.s[1] \n" "fmla v28.4s, v1.4s, v7.s[2] \n" "fmla v31.4s, v1.4s, v7.s[3] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "fmla v8.4s, v2.4s, v4.s[0] \n" "fmla v11.4s, v2.4s, v4.s[1] \n" "fmla v14.4s, v2.4s, v4.s[2] \n" "fmla v17.4s, v2.4s, v4.s[3] \n" "fmla v20.4s, v2.4s, v5.s[0] \n" "fmla v23.4s, v2.4s, v5.s[1] \n" "fmla v26.4s, v2.4s, v5.s[2] \n" "fmla v29.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v4.s[0] \n" "fmla v12.4s, v3.4s, v4.s[1] \n" "fmla v15.4s, v3.4s, v4.s[2] \n" "fmla v18.4s, v3.4s, v4.s[3] \n" "fmla v21.4s, v3.4s, v5.s[0] \n" "fmla v24.4s, v3.4s, v5.s[1] \n" "fmla v27.4s, v3.4s, v5.s[2] \n" "fmla v30.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v10.4s, v0.4s, v4.s[0] \n" "fmla v13.4s, v0.4s, v4.s[1] \n" "fmla v16.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v22.4s, v0.4s, v5.s[0] \n" "fmla v25.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v31.4s, v0.4s, v5.s[3] \n" "fmla v8.4s, v1.4s, v6.s[0] \n" "fmla v11.4s, v1.4s, v6.s[1] \n" "fmla v14.4s, v1.4s, v6.s[2] \n" "fmla v17.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v23.4s, v1.4s, v7.s[1] \n" "fmla v26.4s, v1.4s, v7.s[2] \n" "fmla v29.4s, v1.4s, v7.s[3] \n" "fmla v9.4s, v2.4s, v6.s[0] \n" "fmla v12.4s, v2.4s, v6.s[1] \n" "fmla v15.4s, v2.4s, v6.s[2] \n" "fmla v18.4s, v2.4s, v6.s[3] \n" "fmla v21.4s, v2.4s, v7.s[0] \n" "fmla v24.4s, v2.4s, v7.s[1] \n" "fmla v27.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v10.4s, v3.4s, v6.s[0] \n" "fmla v13.4s, v3.4s, v6.s[1] \n" "fmla v16.4s, v3.4s, v6.s[2] \n" "fmla v19.4s, v3.4s, v6.s[3] \n" "fmla v22.4s, v3.4s, v7.s[0] \n" "fmla v25.4s, v3.4s, v7.s[1] \n" "fmla v28.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" "st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n" "st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n" "st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n" "st1 {v20.4s, v21.4s, v22.4s}, [%5], #48 \n" "st1 {v23.4s, v24.4s, v25.4s}, [%6], #48 \n" "st1 {v26.4s, v27.4s, v28.4s}, [%7], #48 \n" "st1 {v29.4s, v30.4s, v31.4s}, [%8], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v18.4s, v0.4s, v4.s[1] \n" "fmla v20.4s, v0.4s, v4.s[2] \n" "fmla v22.4s, v0.4s, v4.s[3] \n" "fmla v24.4s, v0.4s, v5.s[0] \n" "fmla v26.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v30.4s, v0.4s, v5.s[3] \n" "fmla v17.4s, v1.4s, v4.s[0] \n" "fmla v19.4s, v1.4s, v4.s[1] \n" "fmla v21.4s, v1.4s, v4.s[2] \n" "fmla v23.4s, v1.4s, v4.s[3] \n" "fmla v25.4s, v1.4s, v5.s[0] \n" "fmla v27.4s, v1.4s, v5.s[1] \n" "fmla v29.4s, v1.4s, v5.s[2] \n" "fmla v31.4s, v1.4s, v5.s[3] \n" "fmla v16.4s, v2.4s, v6.s[0] \n" "fmla v18.4s, v2.4s, v6.s[1] \n" "fmla v20.4s, v2.4s, v6.s[2] \n" "fmla v22.4s, v2.4s, v6.s[3] \n" "fmla v24.4s, v2.4s, v7.s[0] \n" "fmla v26.4s, v2.4s, v7.s[1] \n" "fmla v28.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v17.4s, v3.4s, v6.s[0] \n" "fmla v19.4s, v3.4s, v6.s[1] \n" "fmla v21.4s, v3.4s, v6.s[2] \n" "fmla v23.4s, v3.4s, v6.s[3] \n" "fmla v25.4s, v3.4s, v7.s[0] \n" "fmla v27.4s, v3.4s, v7.s[1] \n" "fmla v29.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v12.4s, v8.s[0] \n" "fmla v18.4s, v12.4s, v8.s[1] \n" "fmla v20.4s, v12.4s, v8.s[2] \n" "fmla v22.4s, v12.4s, v8.s[3] \n" "fmla v24.4s, v12.4s, v9.s[0] \n" "fmla v26.4s, v12.4s, v9.s[1] \n" "fmla v28.4s, v12.4s, v9.s[2] \n" "fmla v30.4s, v12.4s, v9.s[3] \n" "fmla v17.4s, v13.4s, v8.s[0] \n" "fmla v19.4s, v13.4s, v8.s[1] \n" "fmla v21.4s, v13.4s, v8.s[2] \n" "fmla v23.4s, v13.4s, v8.s[3] \n" "fmla v25.4s, v13.4s, v9.s[0] \n" "fmla v27.4s, v13.4s, v9.s[1] \n" "fmla v29.4s, v13.4s, v9.s[2] \n" "fmla v31.4s, v13.4s, v9.s[3] \n" "fmla v16.4s, v14.4s, v10.s[0] \n" "fmla v18.4s, v14.4s, v10.s[1] \n" "fmla v20.4s, v14.4s, v10.s[2] \n" "fmla v22.4s, v14.4s, v10.s[3] \n" "fmla v24.4s, v14.4s, v11.s[0] \n" "fmla v26.4s, v14.4s, v11.s[1] \n" "fmla v28.4s, v14.4s, v11.s[2] \n" "fmla v30.4s, v14.4s, v11.s[3] \n" "fmla v17.4s, v15.4s, v10.s[0] \n" "fmla v19.4s, v15.4s, v10.s[1] \n" "fmla v21.4s, v15.4s, v10.s[2] \n" "fmla v23.4s, v15.4s, v10.s[3] \n" "fmla v25.4s, v15.4s, v11.s[0] \n" "fmla v27.4s, v15.4s, v11.s[1] \n" "fmla v29.4s, v15.4s, v11.s[2] \n" "fmla v31.4s, v15.4s, v11.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" "st1 {v20.4s, v21.4s}, [%3], #32 \n" "st1 {v22.4s, v23.4s}, [%4], #32 \n" "st1 {v24.4s, v25.4s}, [%5], #32 \n" "st1 {v26.4s, v27.4s}, [%6], #32 \n" "st1 {v28.4s, v29.4s}, [%7], #32 \n" "st1 {v30.4s, v31.4s}, [%8], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v17.4s, v0.4s, v4.s[1] \n" "fmla v18.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v21.4s, v0.4s, v5.s[1] \n" "fmla v22.4s, v0.4s, v5.s[2] \n" "fmla v23.4s, v0.4s, v5.s[3] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v1.4s, v6.s[0] \n" "fmla v17.4s, v1.4s, v6.s[1] \n" "fmla v18.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v21.4s, v1.4s, v7.s[1] \n" "fmla v22.4s, v1.4s, v7.s[2] \n" "fmla v23.4s, v1.4s, v7.s[3] \n" "fmla v16.4s, v2.4s, v8.s[0] \n" "fmla v17.4s, v2.4s, v8.s[1] \n" "fmla v18.4s, v2.4s, v8.s[2] \n" "fmla v19.4s, v2.4s, v8.s[3] \n" "fmla v20.4s, v2.4s, v9.s[0] \n" "fmla v21.4s, v2.4s, v9.s[1] \n" "fmla v22.4s, v2.4s, v9.s[2] \n" "fmla v23.4s, v2.4s, v9.s[3] \n" "fmla v16.4s, v3.4s, v10.s[0] \n" "fmla v17.4s, v3.4s, v10.s[1] \n" "fmla v18.4s, v3.4s, v10.s[2] \n" "fmla v19.4s, v3.4s, v10.s[3] \n" "fmla v20.4s, v3.4s, v11.s[0] \n" "fmla v21.4s, v3.4s, v11.s[1] \n" "fmla v22.4s, v3.4s, v11.s[2] \n" "fmla v23.4s, v3.4s, v11.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" "st1 {v18.4s}, [%3], #16 \n" "st1 {v19.4s}, [%4], #16 \n" "st1 {v20.4s}, [%5], #16 \n" "st1 {v21.4s}, [%6], #16 \n" "st1 {v22.4s}, [%7], #16 \n" "st1 {v23.4s}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.4s}, [%9], #16 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v5.4s, v0.s[0] \n" "fmla v18.4s, v6.4s, v0.s[1] \n" "fmla v19.4s, v7.4s, v0.s[1] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v8.4s, v0.s[2] \n" "fmla v17.4s, v9.4s, v0.s[2] \n" "fmla v18.4s, v10.4s, v0.s[3] \n" "fmla v19.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "st1 {v16.s}[0], [%1], #4 \n" "st1 {v16.s}[1], [%2], #4 \n" "st1 {v16.s}[2], [%3], #4 \n" "st1 {v16.s}[3], [%4], #4 \n" "st1 {v17.s}[0], [%5], #4 \n" "st1 {v17.s}[1], [%6], #4 \n" "st1 {v17.s}[2], [%7], #4 \n" "st1 {v17.s}[3], [%8], #4 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); } } } remain_outch_start += nn_outch << 3; nn_outch = (outch - remain_outch_start) >> 2; #else // __aarch64__ nn_outch = outch >> 2; #endif // __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); #if __aarch64__ const Mat kernel01_tm = kernel_tm.channel(p / 8 + (p % 8) / 4); #else const Mat kernel01_tm = kernel_tm.channel(p / 4); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%5], #64 \n" "fmla v8.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v14.4s, v3.4s, v5.s[2] \n" "fmla v17.4s, v3.4s, v5.s[3] \n" "fmla v9.4s, v20.4s, v5.s[0] \n" "fmla v12.4s, v20.4s, v5.s[1] \n" "fmla v15.4s, v20.4s, v5.s[2] \n" "fmla v18.4s, v20.4s, v5.s[3] \n" "fmla v10.4s, v21.4s, v5.s[0] \n" "fmla v13.4s, v21.4s, v5.s[1] \n" "fmla v16.4s, v21.4s, v5.s[2] \n" "fmla v19.4s, v21.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5], #64 \n" "fmla v8.4s, v22.4s, v6.s[0] \n" "fmla v11.4s, v22.4s, v6.s[1] \n" "fmla v14.4s, v22.4s, v6.s[2] \n" "fmla v17.4s, v22.4s, v6.s[3] \n" "fmla v9.4s, v23.4s, v6.s[0] \n" "fmla v12.4s, v23.4s, v6.s[1] \n" "fmla v15.4s, v23.4s, v6.s[2] \n" "fmla v18.4s, v23.4s, v6.s[3] \n" "fmla v10.4s, v24.4s, v6.s[0] \n" "fmla v13.4s, v24.4s, v6.s[1] \n" "fmla v16.4s, v24.4s, v6.s[2] \n" "fmla v19.4s, v24.4s, v6.s[3] \n" "fmla v8.4s, v25.4s, v7.s[0] \n" "fmla v11.4s, v25.4s, v7.s[1] \n" "fmla v14.4s, v25.4s, v7.s[2] \n" "fmla v17.4s, v25.4s, v7.s[3] \n" "fmla v9.4s, v26.4s, v7.s[0] \n" "fmla v12.4s, v26.4s, v7.s[1] \n" "fmla v15.4s, v26.4s, v7.s[2] \n" "fmla v18.4s, v26.4s, v7.s[3] \n" "fmla v10.4s, v27.4s, v7.s[0] \n" "fmla v13.4s, v27.4s, v7.s[1] \n" "fmla v16.4s, v27.4s, v7.s[2] \n" "fmla v19.4s, v27.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" "st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n" "st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n" "st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif // __aarch64__ for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v10.4s, v0.4s, v4.s[1] \n" "fmla v12.4s, v0.4s, v4.s[2] \n" "fmla v14.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v11.4s, v1.4s, v4.s[1] \n" "fmla v13.4s, v1.4s, v4.s[2] \n" "fmla v15.4s, v1.4s, v4.s[3] \n" "fmla v8.4s, v2.4s, v5.s[0] \n" "fmla v10.4s, v2.4s, v5.s[1] \n" "fmla v12.4s, v2.4s, v5.s[2] \n" "fmla v14.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v13.4s, v3.4s, v5.s[2] \n" "fmla v15.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%5], #64 \n" "fmla v8.4s, v16.4s, v6.s[0] \n" "fmla v10.4s, v16.4s, v6.s[1] \n" "fmla v12.4s, v16.4s, v6.s[2] \n" "fmla v14.4s, v16.4s, v6.s[3] \n" "fmla v9.4s, v17.4s, v6.s[0] \n" "fmla v11.4s, v17.4s, v6.s[1] \n" "fmla v13.4s, v17.4s, v6.s[2] \n" "fmla v15.4s, v17.4s, v6.s[3] \n" "fmla v8.4s, v18.4s, v7.s[0] \n" "fmla v10.4s, v18.4s, v7.s[1] \n" "fmla v12.4s, v18.4s, v7.s[2] \n" "fmla v14.4s, v18.4s, v7.s[3] \n" "fmla v9.4s, v19.4s, v7.s[0] \n" "fmla v11.4s, v19.4s, v7.s[1] \n" "fmla v13.4s, v19.4s, v7.s[2] \n" "fmla v15.4s, v19.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q10, q0, d8[1] \n" "vmla.f32 q12, q0, d9[0] \n" "vmla.f32 q14, q0, d9[1] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q11, q1, d8[1] \n" "vmla.f32 q13, q1, d9[0] \n" "vmla.f32 q15, q1, d9[1] \n" "vmla.f32 q8, q2, d10[0] \n" "vmla.f32 q10, q2, d10[1] \n" "vmla.f32 q12, q2, d11[0] \n" "vmla.f32 q14, q2, d11[1] \n" "vmla.f32 q9, q3, d10[0] \n" "vmla.f32 q11, q3, d10[1] \n" "vmla.f32 q13, q3, d11[0] \n" "vmla.f32 q15, q3, d11[1] \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "vmla.f32 q8, q0, d12[0] \n" "vmla.f32 q10, q0, d12[1] \n" "vmla.f32 q12, q0, d13[0] \n" "vmla.f32 q14, q0, d13[1] \n" "vmla.f32 q9, q1, d12[0] \n" "vmla.f32 q11, q1, d12[1] \n" "vmla.f32 q13, q1, d13[0] \n" "vmla.f32 q15, q1, d13[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d14[0] \n" "vmla.f32 q10, q2, d14[1] \n" "vmla.f32 q12, q2, d15[0] \n" "vmla.f32 q14, q2, d15[1] \n" "vmla.f32 q9, q3, d14[0] \n" "vmla.f32 q11, q3, d14[1] \n" "vmla.f32 q13, q3, d15[0] \n" "vmla.f32 q15, q3, d15[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1]! \n" "vst1.f32 {d20-d23}, [%2]! \n" "vst1.f32 {d24-d27}, [%3]! \n" "vst1.f32 {d28-d31}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v0.4s, v4.s[1] \n" "fmla v10.4s, v0.4s, v4.s[2] \n" "fmla v11.4s, v0.4s, v4.s[3] \n" "fmla v8.4s, v1.4s, v5.s[0] \n" "fmla v9.4s, v1.4s, v5.s[1] \n" "fmla v10.4s, v1.4s, v5.s[2] \n" "fmla v11.4s, v1.4s, v5.s[3] \n" "fmla v8.4s, v2.4s, v6.s[0] \n" "fmla v9.4s, v2.4s, v6.s[1] \n" "fmla v10.4s, v2.4s, v6.s[2] \n" "fmla v11.4s, v2.4s, v6.s[3] \n" "fmla v8.4s, v3.4s, v7.s[0] \n" "fmla v9.4s, v3.4s, v7.s[1] \n" "fmla v10.4s, v3.4s, v7.s[2] \n" "fmla v11.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s}, [%1], #16 \n" "st1 {v9.4s}, [%2], #16 \n" "st1 {v10.4s}, [%3], #16 \n" "st1 {v11.4s}, [%4], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q0, d8[1] \n" "vmla.f32 q10, q0, d9[0] \n" "vmla.f32 q11, q0, d9[1] \n" "vmla.f32 q8, q1, d10[0] \n" "vmla.f32 q9, q1, d10[1] \n" "vmla.f32 q10, q1, d11[0] \n" "vmla.f32 q11, q1, d11[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d12[0] \n" "vmla.f32 q9, q2, d12[1] \n" "vmla.f32 q10, q2, d13[0] \n" "vmla.f32 q11, q2, d13[1] \n" "vmla.f32 q8, q3, d14[0] \n" "vmla.f32 q9, q3, d14[1] \n" "vmla.f32 q10, q3, d15[0] \n" "vmla.f32 q11, q3, d15[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1]! \n" "vst1.f32 {d18-d19}, [%2]! \n" "vst1.f32 {d20-d21}, [%3]! \n" "vst1.f32 {d22-d23}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[1] \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "st1 {v8.s}[0], [%1], #4 \n" "st1 {v8.s}[1], [%2], #4 \n" "st1 {v8.s}[2], [%3], #4 \n" "st1 {v8.s}[3], [%4], #4 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q7, d1[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vst1.f32 {d16[0]}, [%1]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vst1.f32 {d17[0]}, [%3]! \n" "vst1.f32 {d17[1]}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); #else const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" "fmla v5.4s, v3.4s, v4.s[1] \n" "fmla v6.4s, v12.4s, v4.s[1] \n" "fmla v7.4s, v13.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n" "fmla v8.4s, v14.4s, v4.s[2] \n" "fmla v9.4s, v15.4s, v4.s[2] \n" "fmla v10.4s, v16.4s, v4.s[2] \n" "fmla v5.4s, v17.4s, v4.s[3] \n" "fmla v6.4s, v18.4s, v4.s[3] \n" "fmla v7.4s, v19.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v5.4s \n" "fadd v9.4s, v9.4s, v6.4s \n" "fadd v10.4s, v10.4s, v7.4s \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[1] \n" "fmla v11.4s, v3.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" "fmla v8.4s, v12.4s, v4.s[2] \n" "fmla v9.4s, v13.4s, v4.s[2] \n" "fmla v10.4s, v14.4s, v4.s[3] \n" "fmla v11.4s, v15.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v9.4s, v9.4s, v11.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3]! \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q10, q2, d8[1] \n" "vmla.f32 q11, q3, d8[1] \n" "pld [%2, #512] \n" "vldm %2!, {d24-d31} \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q12, d9[0] \n" "vmla.f32 q9, q13, d9[0] \n" "vmla.f32 q10, q14, d9[1] \n" "vmla.f32 q11, q15, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q9, q9, q11 \n" "vst1.f32 {d16-d19}, [%1]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "st1 {v8.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3]! \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vst1.f32 {d16-d17}, [%1]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* kptr = kernel0_tm.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); for (int q = 0; q < inch; q++) { float32x4_t _r0 = vld1q_f32(r0); float32x4_t _k0 = vld1q_f32(kptr); _sum0 = vmlaq_f32(_sum0, _r0, _k0); kptr += 4; r0 += 4; } #if __aarch64__ float sum0 = vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss2 = vpadd_f32(_ss, _ss); float sum0 = vget_lane_f32(_ss2, 0); #endif output0_tm[0] = sum0; output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u, 1, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; // float32x2_t _bias0 = vdup_n_f32(bias0); float tmp[6][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 1; const float* output0_tm_1 = output0_tm_0 + tiles * 1; const float* output0_tm_2 = output0_tm_0 + tiles * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 7; // TODO neon optimize for (int m = 0; m < 8; m++) { float tmp024a = output0_tm_1[0] + output0_tm_2[0]; float tmp135a = output0_tm_1[0] - output0_tm_2[0]; float tmp024b = output0_tm_3[0] + output0_tm_4[0]; float tmp135b = output0_tm_3[0] - output0_tm_4[0]; float tmp024c = output0_tm_5[0] + output0_tm_6[0]; float tmp135c = output0_tm_5[0] - output0_tm_6[0]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 8; output0_tm_1 += tiles * 8; output0_tm_2 += tiles * 8; output0_tm_3 += tiles * 8; output0_tm_4 += tiles * 8; output0_tm_5 += tiles * 8; output0_tm_6 += tiles * 8; output0_tm_7 += tiles * 8; } unsigned short* output0 = out0.row<unsigned short>(i * 6) + j * 6; for (int m = 0; m < 6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = float32_to_bfloat16(bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32); output0[2] = float32_to_bfloat16(bias0 + tmp024a + tmp024b * 4 + tmp024c * 8); output0[4] = float32_to_bfloat16(bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c); output0[1] = float32_to_bfloat16(bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16); output0[3] = float32_to_bfloat16(bias0 + tmp135a + tmp135b * 8 + tmp135c * 4); output0[5] = float32_to_bfloat16(bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c); output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
Fig_7.15_parPiDivCon.c
#include <omp.h> static long num_steps = 1024*1024*1024; #define MIN_BLK 1024*256 double pi_comp(int Nstart, int Nfinish, double step) { int i, iblk; double x, sum = 0.0, sum1, sum2; if (Nfinish - Nstart < MIN_BLK){ for (i = Nstart; i < Nfinish; i++){ x = (i + 0.5) * step; sum += 4.0 / (1.0 + x*x); } } else { iblk = Nfinish - Nstart; #pragma omp task shared(sum1) sum1 = pi_comp(Nstart, Nfinish - iblk/2, step); #pragma omp task shared(sum2) sum2 = pi_comp(Nfinish - iblk/2, Nfinish, step); #pragma omp taskwait sum = sum1 + sum2; } return sum; } int main() { int i; double step, pi, sum; step = 1.0 / (double) num_steps; #pragma omp parallel { #pragma omp single sum = pi_comp(0, num_steps, step); } pi = step * sum; }
math_array.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & Newcastle University for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_CONTAINER_MATH_ARRAY_H_ #define CORE_CONTAINER_MATH_ARRAY_H_ #include <algorithm> #include <cassert> #include <cmath> #include <numeric> #include <ostream> #include <stdexcept> #include <utility> #include "core/util/root.h" #include "core/util/log.h" namespace bdm { /// Array with a fixed number of elements. It implements the same behaviour /// of the standard `std::array<T, N>` container. However, it provides also /// several custom mathematical operations (e.g. Sum(), Norm() etc.). template <class T, std::size_t N> class MathArray { // NOLINT public: /// Default constructor MathArray() { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] = T(); } } /// Constructor which accepts an std::initiliazer_list to set /// the array's content. /// \param l an initializer list constexpr MathArray(std::initializer_list<T> l) { assert(l.size() <= N); auto it = l.begin(); for (uint64_t i = 0; i < N; i++) { data_[i] = *(it++); } for (uint64_t i = l.size(); i < N; i++) { data_[i] = T(); } } /// Return a pointer to the underlying data. /// \return cont T pointer to the first entry of the array. inline const T* data() const { return &data_[0]; } // NOLINT /// Return the size of the array. /// \return integer denoting the array's size. inline const size_t size() const { return N; } // NOLINT /// Check if the array is empty. /// \return true if size() == 0, false otherwise. inline const bool empty() const { return N == 0; } // NOLINT /// Overloaded array subscript operator. It does not perform /// any boundary checks. /// \param idx element's index. /// \return the requested element. T& operator[](size_t idx) { return data_[idx]; } /// Const overloaded array subscript operator. /// \param idx element's index. /// \return the requested element. const T& operator[](size_t idx) const { return data_[idx]; } /// Returns the element at the given position. It will throw /// an std::out_of_range exception if the given index is out /// of the array's boundaries. /// \param idx the index of the element. /// \return the requested element. T& at(size_t idx) noexcept(false) { // NOLINT if (idx > size() || idx < 0) { throw std::out_of_range("The index is out of range"); } return data_[idx]; } const T* begin() const { return &(data_[0]); } // NOLINT const T* end() const { return &(data_[N]); } // NOLINT T* begin() { return &(data_[0]); } // NOLINT T* end() { return &(data_[N]); } // NOLINT /// Returns the element at the beginning of the array. /// \return first element. T& front() { return *(this->begin()); } // NOLINT /// Return the element at the end of the array. /// \return last element. T& back() { // NOLINT auto tmp = this->end(); tmp--; return *tmp; } /// Assignment operator. /// \param other the other MathArray instance. /// \return the current MathArray. MathArray& operator=(const MathArray& other) { if (this != &other) { assert(other.size() == N); std::copy(other.data_, other.data_ + other.size(), data_); } return *this; } /// Equality operator. /// \param other a MathArray instance. /// \return true if they have the same content, false otherwise. bool operator==(const MathArray& other) const { if (other.size() != N) { return false; } for (size_t i = 0; i < N; i++) { if (other[i] != data_[i]) { return false; } } return true; } MathArray& operator++() { #pragma omp simd for (size_t i = 0; i < N; i++) { ++data_[i]; } return *this; } MathArray operator++(int) { MathArray tmp(*this); operator++(); return tmp; } MathArray& operator--() { #pragma omp simd for (size_t i = 0; i < N; i++) { --data_[i]; } return *this; } MathArray operator--(int) { MathArray tmp(*this); operator--(); return tmp; } MathArray& operator+=(const MathArray& rhs) { assert(N == rhs.size()); #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] += rhs[i]; } return *this; } MathArray operator+(const MathArray& rhs) { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] + rhs[i]; } return tmp; } const MathArray operator+(const MathArray& rhs) const { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] + rhs[i]; } return tmp; } MathArray& operator+=(const T& rhs) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] += rhs; } return *this; } MathArray operator+(const T& rhs) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] + rhs; } return tmp; } MathArray& operator-=(const MathArray& rhs) { assert(size() == rhs.size()); #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] -= rhs[i]; } return *this; } MathArray operator-(const MathArray& rhs) { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] - rhs[i]; } return tmp; } const MathArray operator-(const MathArray& rhs) const { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] - rhs[i]; } return tmp; } MathArray& operator-=(const T& rhs) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] -= rhs; } return *this; } MathArray operator-(const T& rhs) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] - rhs; } return tmp; } T& operator*=(const MathArray& rhs) = delete; T operator*(const MathArray& rhs) { assert(size() == rhs.size()); T result = 0; #pragma omp simd for (size_t i = 0; i < N; i++) { result += data_[i] * rhs[i]; } return result; } const T operator*(const MathArray& rhs) const { assert(size() == rhs.size()); T result = 0; #pragma omp simd for (size_t i = 0; i < N; i++) { result += data_[i] * rhs[i]; } return result; } MathArray& operator*=(const T& k) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] *= k; } return *this; } MathArray operator*(const T& k) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] * k; } return tmp; } const MathArray operator*(const T& k) const { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] * k; } return tmp; } MathArray& operator/=(const T& k) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] /= k; } return *this; } MathArray operator/(const T& k) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] / k; } return tmp; } /// Fill the MathArray with a constant value. /// \param k the constant value /// \return the array MathArray& fill(const T& k) { // NOLINT std::fill(std::begin(data_), std::end(data_), k); return *this; } /// Return the sum of all the array's elements. /// \return sum of the array's content. T Sum() const { return std::accumulate(begin(), end(), 0); } /// Compute the norm of the array's content. /// \return array's norm. T Norm() const { T result = 0; #pragma omp simd for (size_t i = 0; i < N; i++) { result += data_[i] * data_[i]; } result = std::sqrt(result); return result; } /// Normalize the array. It will be done in-place. /// \return the normalized array. MathArray& Normalize() { T norm = Norm(); if (norm == 0){ Log::Fatal("MathArray::Normalize","You tried to normalize a zero vector. " "This cannot be done. Exiting."); } #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] /= norm; } return *this; } /// Compute the entry wise product given another array /// of the same size. /// \param rhs the other array /// \return a new array with the result MathArray EntryWiseProduct(const MathArray& rhs) { assert(rhs.size() == N); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; ++i) { tmp[i] = data_[i] * rhs[i]; } return tmp; } private: T data_[N]; BDM_CLASS_DEF_NV(MathArray, 1); // NOLINT }; template <class T, std::size_t N> std::ostream& operator<<(std::ostream& o, const MathArray<T, N>& arr) { for (size_t i = 0; i < N; i++) { o << arr[i]; if (i != N - 1) { o << ", "; } } return o; } /// Alias for a size 3 MathArray using Double3 = MathArray<double, 3>; /// Alias for a size 4 MathArray using Double4 = MathArray<double, 4>; } // namespace bdm #endif // CORE_CONTAINER_MATH_ARRAY_H_
yolov2.h
#ifndef YOLOV3 #define YOLOV3 #include <stdio.h> #include <stdlib.h> //#include <iostream> #include <math.h> #include <fcntl.h> #include <string.h> #include <time.h> #include "xconv_hw.h" //#include "hw_drivers.h" #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #define FLT_MAX 3.402823466e+38F /* max value */ double what_time_is_it_now() { struct timeval time; if (gettimeofday(&time,NULL)){ return 0; } return (double)time.tv_sec + (double)time.tv_usec * .000001; } //#include "yolo_hls.h" typedef enum{ LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN } ACTIVATION; typedef enum { CONVOLUTIONAL, DECONVOLUTIONAL, CONNECTED, MAXPOOL, SOFTMAX, DETECTION, DROPOUT, CROP, ROUTE, COST, NORMALIZATION, AVGPOOL, LOCAL, SHORTCUT, ACTIVE, RNN, GRU, LSTM, CRNN, BATCHNORM, NETWORK, XNOR, REGION, YOLO, REORG, UPSAMPLE, LOGXENT, L2NORM, BLANK } LAYER_TYPE; struct network; typedef struct network network; struct layer; typedef struct layer layer; struct layer{ LAYER_TYPE type; ACTIVATION activation; void (*forward) (struct layer, struct network); int batch_normalize; int shortcut; int batch; int forced; int flipped; int inputs; int outputs; int nweights; int nbiases; int extra; int truths; int h,w,c; int out_h, out_w, out_c; int n; int max_boxes; int groups; int size; int side; int stride; int reverse; int flatten; int spatial; int pad; int sqrt; int flip; int index; int binary; int xnor; int steps; int hidden; int truth; float smooth; float dot; float angle; float jitter; float saturation; float exposure; float shift; float ratio; float learning_rate_scale; float clip; int softmax; int classes; int coords; int background; int rescore; int objectness; int joint; int noadjust; int reorg; int log; int tanh; int *mask; int total; float alpha; float beta; float kappa; float coord_scale; float object_scale; float noobject_scale; float mask_scale; float class_scale; int bias_match; int random; float ignore_thresh; float truth_thresh; float thresh; float focus; int classfix; int absolute; int onlyforward; int stopbackward; // int dontload; int dontsave; // int dontloadscales; float temperature; float probability; float scale; char * cweights; int * indexes; int * input_layers; int * input_sizes; int * map; float * rand; float * cost; float * state; float * prev_state; float * forgot_state; float * forgot_delta; float * state_delta; float * combine_cpu; float * combine_delta_cpu; float * concat; float * concat_delta; float * binary_weights; float * biases; float * bias_updates; float * scales; float * scale_updates; float * weights; float * weight_updates; float * delta; float * output; float * loss; float * squared; float * norms; float * spatial_mean; float * mean; float * variance; float * mean_delta; float * variance_delta; float * rolling_mean; float * rolling_variance; float * x; float * x_norm; float * m; float * v; float * bias_m; float * bias_v; float * scale_m; float * scale_v; float *z_cpu; float *r_cpu; float *h_cpu; float * prev_state_cpu; float *temp_cpu; float *temp2_cpu; float *temp3_cpu; float *dh_cpu; float *hh_cpu; float *prev_cell_cpu; float *cell_cpu; float *f_cpu; float *i_cpu; float *g_cpu; float *o_cpu; float *c_cpu; float *dc_cpu; float * binary_input; struct layer *input_layer; struct layer *self_layer; struct layer *output_layer; struct layer *reset_layer; struct layer *update_layer; struct layer *state_layer; struct layer *input_gate_layer; struct layer *state_gate_layer; struct layer *input_save_layer; struct layer *state_save_layer; struct layer *input_state_layer; struct layer *state_state_layer; struct layer *input_z_layer; struct layer *state_z_layer; struct layer *input_r_layer; struct layer *state_r_layer; struct layer *input_h_layer; struct layer *state_h_layer; struct layer *wz; struct layer *uz; struct layer *wr; struct layer *ur; struct layer *wh; struct layer *uh; struct layer *uo; struct layer *wo; struct layer *uf; struct layer *wf; struct layer *ui; struct layer *wi; struct layer *ug; struct layer *wg; //tree *softmax_tree; size_t workspace_size; }; void free_layer(layer l) { if(l.cweights) free(l.cweights); if(l.indexes) free(l.indexes); if(l.input_layers) free(l.input_layers); if(l.input_sizes) free(l.input_sizes); if(l.map) free(l.map); if(l.rand) free(l.rand); if(l.cost) free(l.cost); if(l.state) free(l.state); if(l.prev_state) free(l.prev_state); if(l.forgot_state) free(l.forgot_state); if(l.forgot_delta) free(l.forgot_delta); if(l.state_delta) free(l.state_delta); if(l.concat) free(l.concat); if(l.concat_delta) free(l.concat_delta); if(l.binary_weights) free(l.binary_weights); if(l.biases) free(l.biases); if(l.bias_updates) free(l.bias_updates); if(l.scales) free(l.scales); if(l.scale_updates) free(l.scale_updates); if(l.weights) free(l.weights); if(l.weight_updates) free(l.weight_updates); if(l.delta) free(l.delta); if(l.output) free(l.output); if(l.squared) free(l.squared); if(l.norms) free(l.norms); if(l.spatial_mean) free(l.spatial_mean); if(l.mean) free(l.mean); if(l.variance) free(l.variance); if(l.mean_delta) free(l.mean_delta); if(l.variance_delta) free(l.variance_delta); if(l.rolling_mean) free(l.rolling_mean); if(l.rolling_variance) free(l.rolling_variance); if(l.x) free(l.x); if(l.x_norm) free(l.x_norm); if(l.m) free(l.m); if(l.v) free(l.v); if(l.z_cpu) free(l.z_cpu); if(l.r_cpu) free(l.r_cpu); if(l.h_cpu) free(l.h_cpu); if(l.binary_input) free(l.binary_input); } //void free_layer(layer); typedef enum { CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM } learning_rate_policy; typedef struct network{ int n; int batch; size_t *seen; int *t; float epoch; int subdivisions; layer *layers; float *output; learning_rate_policy policy; float learning_rate; float momentum; float decay; float gamma; float scale; float power; int time_steps; int step; int max_batches; float *scales; int *steps; int num_steps; int burn_in; int adam; float B1; float B2; float eps; int inputs; int outputs; int truths; int notruth; int h, w, c; int max_crop; int min_crop; float max_ratio; float min_ratio; int center; float angle; float aspect; float exposure; float saturation; float hue; int random; int gpu_index; // tree *hierarchy; float *input; float *truth; float *delta; float *workspace; int train; int index; float *cost; float clip; } network; network *make_network(int n); layer get_network_output_layer(network *net); typedef struct { int w; int h; float scale; float rad; float dx; float dy; float aspect; } augment_args; typedef struct { int w; int h; int c; float *data; } image; typedef struct{ float x, y, w, h; } box; typedef struct detection{ box bbox; int classes; float *prob; float *mask; float objectness; int sort_class; } detection; typedef struct matrix{ int rows, cols; float **vals; } matrix; typedef struct{ int w, h; matrix X; matrix y; int shallow; int *num_boxes; box **boxes; } data; typedef enum { CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA, LETTERBOX_DATA, REGRESSION_DATA, SEGMENTATION_DATA, INSTANCE_DATA } data_type; typedef struct load_args{ int threads; char **paths; char *path; int n; int m; char **labels; int h; int w; int out_w; int out_h; int nh; int nw; int num_boxes; int min, max, size; int classes; int background; int scale; int center; int coords; float jitter; float angle; float aspect; float saturation; float exposure; float hue; data *d; image *im; image *resized; data_type type; // tree *hierarchy; } load_args; typedef struct{ int id; float x,y,w,h; float left, right, top, bottom; } box_label; //network *load_network(char *cfg, char *weights, int clear); //load_args get_base_args(network *net); //void free_data(data d); typedef struct{ char *key; char *val; int used; } kvp; typedef struct node{ void *val; struct node *next; struct node *prev; } node; typedef struct list{ int size; node *front; node *back; } list; void error(const char *s) { perror(s); assert(0); exit(-1); } void malloc_error() { fprintf(stderr, "Malloc error\n"); exit(-1); } void file_error(char *s) { fprintf(stderr, "Couldn't open file: %s\n", s); exit(0); } /////////////////list begin list *make_list() { list *l = (list *)malloc(sizeof(list)); l->size = 0; l->front = 0; l->back = 0; return l; } void *list_pop(list *l){ if(!l->back) return 0; node *b = l->back; void *val = b->val; l->back = b->prev; if(l->back) l->back->next = 0; free(b); --l->size; return val; } void list_insert(list *l, void *val) { node *new_node = (node *)malloc(sizeof(node)); new_node->val = val; new_node->next = 0; if(!l->back){ l->front = new_node; new_node->prev = 0; }else{ l->back->next = new_node; new_node->prev = l->back; } l->back = new_node; ++l->size; } void free_node(node *n) { node *next; while(n) { next = n->next; free(n); n = next; } } void free_list(list *l) { free_node(l->front); free(l); } void free_list_contents(list *l) { node *n = l->front; while(n){ free(n->val); n = n->next; } } void **list_to_array(list *l) { void **a = (void **)calloc(l->size, sizeof(void*)); int count = 0; node *n = l->front; while(n){ a[count++] = n->val; n = n->next; } return a; } /////////////////list end /////////////////////utils begin void del_arg(int argc, char **argv, int index) { int i; for(i = index; i < argc-1; ++i) argv[i] = argv[i+1]; argv[i] = 0; } int find_arg(int argc, char* argv[], char *arg) { int i; for(i = 0; i < argc; ++i) { if(!argv[i]) continue; if(0==strcmp(argv[i], arg)) { del_arg(argc, argv, i); return 1; } } return 0; } int find_int_arg(int argc, char **argv, char *arg, int def) { int i; for(i = 0; i < argc-1; ++i){ if(!argv[i]) continue; if(0==strcmp(argv[i], arg)){ def = atoi(argv[i+1]); del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } float find_float_arg(int argc, char **argv, char *arg, float def) { int i; for(i = 0; i < argc-1; ++i){ if(!argv[i]) continue; if(0==strcmp(argv[i], arg)){ def = atof(argv[i+1]); del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } char *find_char_arg(int argc, char **argv, char *arg, char *def) { int i; for(i = 0; i < argc-1; ++i){ if(!argv[i]) continue; if(0==strcmp(argv[i], arg)){ def = argv[i+1]; del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } unsigned char *read_file(char *filename) { FILE *fp = fopen(filename, "rb"); size_t size; fseek(fp, 0, SEEK_END); size = ftell(fp); fseek(fp, 0, SEEK_SET); unsigned char *text = (unsigned char *)calloc(size+1, sizeof(unsigned char)); fread(text, 1, size, fp); fclose(fp); return text; } list *split_str(char *s, char delim) { size_t i; size_t len = strlen(s); list *l = make_list(); list_insert(l, s); for(i = 0; i < len; ++i){ if(s[i] == delim){ s[i] = '\0'; list_insert(l, &(s[i+1])); } } return l; } void strip(char *s) { size_t i; size_t len = strlen(s); size_t offset = 0; for(i = 0; i < len; ++i){ char c = s[i]; if(c==' '||c=='\t'||c=='\n') ++offset; else s[i-offset] = c; } s[len-offset] = '\0'; } void strip_char(char *s, char bad) { size_t i; size_t len = strlen(s); size_t offset = 0; for(i = 0; i < len; ++i){ char c = s[i]; if(c==bad) ++offset; else s[i-offset] = c; } s[len-offset] = '\0'; } void free_ptrs(void **ptrs, int n) { int i; for(i = 0; i < n; ++i) free(ptrs[i]); free(ptrs); } char *fgetl(FILE *fp) { if(feof(fp)) return 0; size_t size = 512; char *line = (char *)malloc(size*sizeof(char)); if(!fgets(line, size, fp)){ free(line); return 0; } size_t curr = strlen(line); while((line[curr-1] != '\n') && !feof(fp)){ if(curr == size-1){ size *= 2; line = (char *)realloc(line, size*sizeof(char)); if(!line) { printf("%ld\n", size); malloc_error(); } } size_t readsize = size-curr; if(readsize > INT_MAX) readsize = INT_MAX-1; fgets(&line[curr], readsize, fp); curr = strlen(line); } if(line[curr-1] == '\n') line[curr-1] = '\0'; return line; } /////////////////////utils end ////////////////////option_list begin void option_insert(list *l, char *key, char *val) { kvp *p = (kvp *)malloc(sizeof(kvp)); p->key = key; p->val = val; p->used = 0; list_insert(l, p); } int read_option(char *s, list *options) { size_t i; size_t len = strlen(s); char *val = 0; for(i = 0; i < len; ++i){ if(s[i] == '='){ s[i] = '\0'; val = s+i+1; break; } } if(i == len-1) return 0; char *key = s; option_insert(options, key, val); return 1; } void option_unused(list *l) { node *n = l->front; while(n){ kvp *p = (kvp *)n->val; if(!p->used){ fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val); } n = n->next; } } char *option_find(list *l, char *key) { node *n = l->front; while(n){ kvp *p = (kvp *)n->val; if(strcmp(p->key, key) == 0){ p->used = 1; return p->val; } n = n->next; } return 0; } char *option_find_str(list *l, char *key, char *def) { char *v = option_find(l, key); if(v) return v; if(def) fprintf(stderr, "%s: Using default '%s'\n", key, def); return def; } int option_find_int(list *l, char *key, int def) { char *v = option_find(l, key); if(v) return atoi(v); fprintf(stderr, "%s: Using default '%d'\n", key, def); return def; } int option_find_int_quiet(list *l, char *key, int def) { char *v = option_find(l, key); if(v) return atoi(v); return def; } float option_find_float_quiet(list *l, char *key, float def) { char *v = option_find(l, key); if(v) return atof(v); return def; } float option_find_float(list *l, char *key, float def) { char *v = option_find(l, key); if(v) return atof(v); fprintf(stderr, "%s: Using default '%lf'\n", key, def); return def; } list *read_data_cfg(char *filename) { FILE *file = fopen(filename, "r"); if(file == 0) file_error(filename); char *line; int nu = 0; list *options = make_list(); while((line=fgetl(file)) != 0){ ++ nu; strip(line); switch(line[0]){ case '\0': case '#': case ';': free(line); break; default: if(!read_option(line, options)){ fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line); free(line); } break; } } fclose(file); return options; } ///////////////////option_list end image make_empty_image(int w, int h, int c) { image out; out.data = 0; out.h = h; out.w = w; out.c = c; return out; } list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } image make_image(int w, int h, int c) { image out = make_empty_image(w,h,c); out.data = (float *)calloc(h*w*c, sizeof(float)); return out; } static float get_pixel(image m, int x, int y, int c) { assert(x < m.w && y < m.h && c < m.c); return m.data[c*m.h*m.w + y*m.w + x]; } static void set_pixel(image m, int x, int y, int c, float val) { if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return; assert(x < m.w && y < m.h && c < m.c); m.data[c*m.h*m.w + y*m.w + x] = val; } static void add_pixel(image m, int x, int y, int c, float val) { assert(x < m.w && y < m.h && c < m.c); m.data[c*m.h*m.w + y*m.w + x] += val; } void free_image(image m) { if(m.data){ free(m.data); } } image resize_image(image im, int w, int h) { image resized = make_image(w, h, im.c); image part = make_image(w, im.h, im.c); int r, c, k; float w_scale = (float)(im.w - 1) / (w - 1); float h_scale = (float)(im.h - 1) / (h - 1); for(k = 0; k < im.c; ++k){ for(r = 0; r < im.h; ++r){ for(c = 0; c < w; ++c){ float val = 0; if(c == w-1 || im.w == 1){ val = get_pixel(im, im.w-1, r, k); } else { float sx = c*w_scale; int ix = (int) sx; float dx = sx - ix; val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix+1, r, k); } set_pixel(part, c, r, k, val); } } } for(k = 0; k < im.c; ++k){ for(r = 0; r < h; ++r){ float sy = r*h_scale; int iy = (int) sy; float dy = sy - iy; for(c = 0; c < w; ++c){ float val = (1-dy) * get_pixel(part, c, iy, k); set_pixel(resized, c, r, k, val); } if(r == h-1 || im.h == 1) continue; for(c = 0; c < w; ++c){ float val = dy * get_pixel(part, c, iy+1, k); add_pixel(resized, c, r, k, val); } } } free_image(part); return resized; } void fill_image(image m, float s) { int i; for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] = s; } void embed_image(image source, image dest, int dx, int dy) { int x,y,k; for(k = 0; k < source.c; ++k){ for(y = 0; y < source.h; ++y){ for(x = 0; x < source.w; ++x){ float val = get_pixel(source, x,y,k); set_pixel(dest, dx+x, dy+y, k, val); } } } } image letterbox_image(image im, int w, int h) { int new_w = im.w; int new_h = im.h; if (((float)w/im.w) < ((float)h/im.h)) { new_w = w; new_h = (im.h * w)/im.w; } else { new_h = h; new_w = (im.w * h)/im.h; } image resized = resize_image(im, new_w, new_h); image boxed = make_image(w, h, im.c); fill_image(boxed, .5); //int i; //for(i = 0; i < boxed.w*boxed.h*boxed.c; ++i) boxed.data[i] = 0; embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2); free_image(resized); return boxed; } image load_image_stb(char *filename, int channels) { int w, h, c; unsigned char *data = stbi_load(filename, &w, &h, &c, channels); if (!data) { fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason()); exit(0); } if(channels) c = channels; int i,j,k; image im = make_image(w, h, c); for(k = 0; k < c; ++k){ for(j = 0; j < h; ++j){ for(i = 0; i < w; ++i){ int dst_index = i + w*j + w*h*k; int src_index = k + c*i + c*w*j; im.data[dst_index] = (float)data[src_index]/255.; } } } free(data); return im; } void save_image_png(image im, const char *name) { char buff[256]; //sprintf(buff, "%s (%d)", name, windows); sprintf(buff, "%s.png", name); unsigned char *data = (unsigned char *)calloc(im.w*im.h*im.c, sizeof(char)); int i,k; for(k = 0; k < im.c; ++k){ for(i = 0; i < im.w*im.h; ++i){ data[i*im.c+k] = (unsigned char) (255*im.data[i + k*im.w*im.h]); } } int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c); free(data); if(!success) fprintf(stderr, "Failed to write image %s\n", buff); } image **load_alphabet() { int i, j; const int nsize = 8; image **alphabets = (image **)calloc(nsize, sizeof(image)); for(j = 0; j < nsize; ++j){ alphabets[j] = (image *)calloc(128, sizeof(image)); for(i = 32; i < 127; ++i){ char buff[256]; sprintf(buff, "labels/%d_%d.png", i, j); //alphabets[j][i] = load_image_color(buff, 0, 0); alphabets[j][i] = load_image_stb(buff, 3); } } return alphabets; } ///////////////////activation begin static inline float stair_activate(float x) { int n = floor(x); if (n%2 == 0) return floor(x/2.); else return (x - n) + floor(x/2.); } static inline float hardtan_activate(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } static inline float linear_activate(float x){return x;} static inline float logistic_activate(float x){return 1./(1. + exp(-x));} static inline float loggy_activate(float x){return 2./(1. + exp(-x)) - 1;} static inline float relu_activate(float x){return x*(x>0);} static inline float elu_activate(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);} static inline float relie_activate(float x){return (x>0) ? x : .01*x;} static inline float ramp_activate(float x){return x*(x>0)+.1*x;} static inline float leaky_activate(float x){return (x>0) ? x : .1*x;} static inline float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);} static inline float plse_activate(float x) { if(x < -4) return .01 * (x + 4); if(x > 4) return .01 * (x - 4) + 1; return .125*x + .5; } static inline float lhtan_activate(float x) { if(x < 0) return .001*x; if(x > 1) return .001*(x-1) + 1; return x; } static inline float lhtan_gradient(float x) { if(x > 0 && x < 1) return 1; return .001; } static inline float hardtan_gradient(float x) { if (x > -1 && x < 1) return 1; return 0; } static inline float linear_gradient(float x){return 1;} static inline float logistic_gradient(float x){return (1-x)*x;} static inline float loggy_gradient(float x) { float y = (x+1.)/2.; return 2*(1-y)*y; } static inline float stair_gradient(float x) { if (floor(x) == x) return 0; return 1; } static inline float relu_gradient(float x){return (x>0);} static inline float elu_gradient(float x){return (x >= 0) + (x < 0)*(x + 1);} static inline float relie_gradient(float x){return (x>0) ? 1 : .01;} static inline float ramp_gradient(float x){return (x>0)+.1;} static inline float leaky_gradient(float x){return (x>0) ? 1 : .1;} static inline float tanh_gradient(float x){return 1-x*x;} static inline float plse_gradient(float x){return (x < 0 || x > 1) ? .01 : .125;} char *get_activation_string(ACTIVATION a) { switch(a){ case LOGISTIC: return "logistic"; case LOGGY: return "loggy"; case RELU: return "relu"; case ELU: return "elu"; case RELIE: return "relie"; case RAMP: return "ramp"; case LINEAR: return "linear"; case TANH: return "tanh"; case PLSE: return "plse"; case LEAKY: return "leaky"; case STAIR: return "stair"; case HARDTAN: return "hardtan"; case LHTAN: return "lhtan"; default: break; } return "relu"; } ACTIVATION get_activation(char *s) { if (strcmp(s, "logistic")==0) return LOGISTIC; if (strcmp(s, "loggy")==0) return LOGGY; if (strcmp(s, "relu")==0) return RELU; if (strcmp(s, "elu")==0) return ELU; if (strcmp(s, "relie")==0) return RELIE; if (strcmp(s, "plse")==0) return PLSE; if (strcmp(s, "hardtan")==0) return HARDTAN; if (strcmp(s, "lhtan")==0) return LHTAN; if (strcmp(s, "linear")==0) return LINEAR; if (strcmp(s, "ramp")==0) return RAMP; if (strcmp(s, "leaky")==0) return LEAKY; if (strcmp(s, "tanh")==0) return TANH; if (strcmp(s, "stair")==0) return STAIR; fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s); return RELU; } float activate(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate(x); case LOGISTIC: return logistic_activate(x); case LOGGY: return loggy_activate(x); case RELU: return relu_activate(x); case ELU: return elu_activate(x); case RELIE: return relie_activate(x); case RAMP: return ramp_activate(x); case LEAKY: return leaky_activate(x); case TANH: return tanh_activate(x); case PLSE: return plse_activate(x); case STAIR: return stair_activate(x); case HARDTAN: return hardtan_activate(x); case LHTAN: return lhtan_activate(x); } return 0; } void activate_array(float *x, const int n, const ACTIVATION a) { int i; for(i = 0; i < n; ++i){ x[i] = activate(x[i], a); } } float gradient(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient(x); case LOGISTIC: return logistic_gradient(x); case LOGGY: return loggy_gradient(x); case RELU: return relu_gradient(x); case ELU: return elu_gradient(x); case RELIE: return relie_gradient(x); case RAMP: return ramp_gradient(x); case LEAKY: return leaky_gradient(x); case TANH: return tanh_gradient(x); case PLSE: return plse_gradient(x); case STAIR: return stair_gradient(x); case HARDTAN: return hardtan_gradient(x); case LHTAN: return lhtan_gradient(x); } return 0; } ///////////////////activation end void copy_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX]; } void fill_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] = ALPHA; } void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); //printf("shorcut_layer batch=%d,stride=%d,sample=%d\n",batch,stride,sample); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int i,j,k,b; for(b = 0; b < batch; ++b){ for(k = 0; k < minc; ++k){ for(j = 0; j < minh; ++j){ for(i = 0; i < minw; ++i){ int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] = s1*out[out_index] + s2*add[add_index]; } } } } } void forward_shortcut_layer(const layer l, network net) { //copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1); //shortcut_cpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output); //activate_array(l.output, l.outputs*l.batch, l.activation); int w = l.w; int h = l.h; int c = l.c; float *add = net.layers[l.index].output; float *out = l.output; float *in = net.input; int i,j,k; for(k = 0; k < c; ++k){ for(j = 0; j < h; ++j){ for(i = 0; i < w; ++i){ int index = i + w*(j + h*k ); out[index] = in[index] + add[index]; } } } } layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2) { fprintf(stderr, "res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c); layer l; memset(&l,0,sizeof(layer)); l.type = SHORTCUT; l.batch = batch; l.w = w2; l.h = h2; l.c = c2; l.out_w = w; l.out_h = h; l.out_c = c; l.outputs = w*h*c; l.inputs = l.outputs; l.index = index; l.output = (float *)calloc(l.outputs*batch, sizeof(float));; l.forward = forward_shortcut_layer; return l; } int convolutional_out_height(layer l) { return (l.h + 2*l.pad - l.size) / l.stride + 1; } int convolutional_out_width(layer l) { return (l.w + 2*l.pad - l.size) / l.stride + 1; } static size_t get_workspace_size(layer l){ return (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float); } void add_bias(float *output, float *biases, int batch, int n, int size) { int i,j,b; for(b = 0; b < batch; ++b){ for(i = 0; i < n; ++i){ for(j = 0; j < size; ++j){ output[(b*n + i)*size + j] += biases[i]; } } } } void scale_bias(float *output, float *scales, int batch, int n, int size) { int i,j,b; for(b = 0; b < batch; ++b){ for(i = 0; i < n; ++i){ for(j = 0; j < size; ++j){ output[(b*n + i)*size + j] *= scales[i]; } } } } float im2col_get_pixel(float *im, int height, int width, int channels, int row, int col, int channel, int pad) { row -= pad; col -= pad; if (row < 0 || col < 0 || row >= height || col >= width) return 0; return im[col + width*(row + height*channel)]; } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { int c,h,w; int height_col = (height + 2*pad - ksize) / stride + 1; int width_col = (width + 2*pad - ksize) / stride + 1; int channels_col = channels * ksize * ksize; for (c = 0; c < channels_col; ++c) { int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } if(!TA && !TB) gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); //else if(TA && !TB) // gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); //else if(!TA && TB) // gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); //else // gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { int b, f, i; for(b = 0; b < batch; ++b){ for(f = 0; f < filters; ++f){ for(i = 0; i < spatial; ++i){ int index = b*filters*spatial + f*spatial + i; x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f); } } } } void forward_batchnorm_layer(layer l, network net)//for conv { normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.out_c, l.out_h*l.out_w); scale_bias(l.output, l.scales, l.batch, l.out_c, l.out_h*l.out_w); add_bias(l.output, l.biases, l.batch, l.out_c, l.out_h*l.out_w); } void CONV_Padding_Relu(float *Input,float *Output,float *Weight,const int InFM_num,const int OutFM_num,const int Kernel_size,const int Kernel_stride,const int Input_w,const int Input_h,const int Padding) { // (output_w - 1)*Kernel_stride + Kernel_size = Input_w const int output_w = (Input_w - Kernel_size + 2*Padding)/Kernel_stride + 1 ; const int output_h = (Input_h - Kernel_size + 2*Padding)/Kernel_stride + 1 ; int x, y, of, inf; int m,n; for( of = 0; of < OutFM_num; of++){ for( y = 0; y < output_h; y++) { for( x = 0; x < output_w; x++){ float tmp = 0.0; for(inf = 0;inf < InFM_num; inf++) { int intput_offset = inf*Input_w*Input_h + (y*Kernel_stride - Padding)*Input_w + x*Kernel_stride - Padding; for(m = 0;m < Kernel_size; m++) { for(n = 0;n < Kernel_size; n++) { int kernel_offset = of*InFM_num*Kernel_size*Kernel_size + inf*Kernel_size*Kernel_size; bool inFM_width = ((x*Kernel_stride + n - Padding) >= 0)&&((x*Kernel_stride + n - Padding) < Input_w); bool inFM_height = ((y*Kernel_stride + m - Padding) >= 0)&&((y*Kernel_stride + m - Padding) < Input_h); if(inFM_width&&inFM_height) tmp += Weight[kernel_offset + m*Kernel_size + n]*Input[intput_offset + m*Input_w + n]; } } } Output[of*output_w*output_h + y*output_w + x] = tmp; } } } } void forward_convolutional_layer(layer l, network net) { int i, j; fill_cpu(l.outputs*l.batch, 0, l.output, 1); //printf("c=%d,n=%d,size=%d,stride=%d,w=%d,h=%d,pad=%d\n",l.c,l.n,l.size,l.stride,l.w,l.h,l.pad); //int m = l.n/l.groups; //int k = l.size*l.size*l.c/l.groups; //int n = l.out_w*l.out_h; //for(i = 0; i < l.batch; ++i){ // for(j = 0; j < l.groups; ++j){ // float *a = l.weights + j*l.nweights/l.groups; // float *b = net.workspace; // float *c = l.output + (i*l.groups + j)*n*m; // im2col_cpu(net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w, // l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); // gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); // } //} int m = l.n; int k = l.size*l.size*l.c; int n = l.out_w*l.out_h; float *a = l.weights; float *b = net.workspace; float *c = l.output; im2col_cpu(net.input,l.c, l.h, l.w, l.size, l.stride, l.pad, b); gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); //CONV_Padding_Relu(net.input,l.output,l.weights,l.c,l.n,l.size,l.stride,l.w,l.h,l.pad); if(l.batch_normalize){ forward_batchnorm_layer(l, net); } else { add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w); } activate_array(l.output, l.outputs*l.batch, l.activation); } layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam) { int i; layer l; memset(&l,0,sizeof(layer)); l.type = CONVOLUTIONAL; l.groups = groups; l.h = h; l.w = w; l.c = c; l.n = n; l.binary = binary; l.xnor = xnor; l.batch = batch; l.stride = stride; l.size = size; l.pad = padding; l.batch_normalize = batch_normalize; // l.weights = (float *)calloc(c/groups*n*size*size, sizeof(float)); // l.biases = (float *)calloc(n, sizeof(float)); l.nweights = c/groups*n*size*size; l.nbiases = n; int out_w = convolutional_out_width(l); int out_h = convolutional_out_height(l); l.out_h = out_h; l.out_w = out_w; l.out_c = n; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = l.w * l.h * l.c; // l.output = (float *)calloc(l.batch*l.outputs, sizeof(float)); l.forward = forward_convolutional_layer; if(batch_normalize){ // l.scales = (float *)calloc(n, sizeof(float)); // l.rolling_mean = (float *)calloc(n, sizeof(float)); //l.rolling_variance = (float *)calloc(n, sizeof(float)); } l.workspace_size = get_workspace_size(l); l.activation = activation; fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.); return l; } void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { int i, j, k, b; for(b = 0; b < batch; ++b){ for(k = 0; k < c; ++k){ for(j = 0; j < h*stride; ++j){ for(i = 0; i < w*stride; ++i){ int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride; int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i; if(forward) out[out_index] = scale*in[in_index]; else in[in_index] += scale*out[out_index]; } } } } } void forward_upsample_layer(const layer l, network net) { //fill_cpu(l.outputs*l.batch, 0, l.output, 1); //upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output); int c = l.c; int h = l.h; int w = l.w; int stride = l.stride; float *in = net.input; float *out = l.output; int i, j, k; for(k = 0; k < c; ++k){ for(j = 0; j < h*stride; ++j){ for(i = 0; i < w*stride; ++i){ int in_index = k*w*h + (j/stride)*w + i/stride; int out_index = k*w*h*stride*stride + j*w*stride + i; out[out_index] = in[in_index]; } } } } layer make_upsample_layer(int batch, int w, int h, int c, int stride) { layer l; memset(&l,0,sizeof(layer)); l.type = UPSAMPLE; l.batch = batch; l.w = w; l.h = h; l.c = c; l.out_w = w*stride; l.out_h = h*stride; l.out_c = c; if(stride < 0){ stride = -stride; l.reverse=1; l.out_w = w/stride; l.out_h = h/stride; } l.stride = stride; l.outputs = l.out_w*l.out_h*l.out_c; l.inputs = l.w*l.h*l.c; l.output = (float *)calloc(l.outputs*batch, sizeof(float));; l.forward = forward_upsample_layer; if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } void forward_route_layer(const layer l, network net) { int i, j; int offset = 0; for(i = 0; i < l.n; ++i){ int index = l.input_layers[i]; float *input = net.layers[index].output; int input_size = l.input_sizes[i]; copy_cpu(input_size, input, 1, l.output + offset, 1); offset += input_size; } } layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes) { fprintf(stderr,"route "); layer l; memset(&l,0,sizeof(layer)); l.type = ROUTE; l.batch = batch; l.n = n; l.input_layers = input_layers; l.input_sizes = input_sizes; int i; int outputs = 0; for(i = 0; i < n; ++i){ fprintf(stderr," %d", input_layers[i]); outputs += input_sizes[i]; } fprintf(stderr, "\n"); l.outputs = outputs; l.inputs = outputs; // l.output = (float *)calloc(outputs*batch, sizeof(float));; l.forward = forward_route_layer; return l; } static int entry_index(layer l, int batch, int location, int entry) { int n = location / (l.w*l.h); int loc = location % (l.w*l.h); return batch*l.outputs + n*l.w*l.h*(4+l.classes+1) + entry*l.w*l.h + loc; } void forward_yolo_layer(const layer l, network net) { int i,j,b,t,n; //char line[256]; //FILE *fp3; //char filename[256]; //sprintf(filename, "yolo_layer_%d.txt", l.outputs); //printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename); // if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n"); //int x; // for( x = 0; x < l.outputs; x++) //{ // sprintf(line, "%f\n", net.input[x]); // if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n"); // } // fclose(fp3); memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float)); for (b = 0; b < l.batch; ++b){ for(n = 0; n < l.n; ++n){ int index = entry_index(l, b, n*l.w*l.h, 0); activate_array(l.output + index, 2*l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, 4); activate_array(l.output + index, (1+l.classes)*l.w*l.h, LOGISTIC); } } return ; } layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes) { int i; layer l; memset(&l,0,sizeof(layer)); l.type = YOLO; l.n = n; l.total = total; l.batch = batch; l.h = h; l.w = w; l.c = n*(classes + 4 + 1); l.out_w = l.w; l.out_h = l.h; l.out_c = l.c; l.classes = classes; //l.cost = (float *)calloc(1, sizeof(float)); l.biases = (float *)calloc(total*2, sizeof(float)); if(mask) l.mask = mask; else{ l.mask = (int *)calloc(n, sizeof(int)); for(i = 0; i < n; ++i){ l.mask[i] = i; } } //l.bias_updates = (float *)calloc(n*2, sizeof(float)); l.outputs = h*w*n*(classes + 4 + 1); l.inputs = l.outputs; //l.truths = 90*(4 + 1); //l.delta = (float *)calloc(batch*l.outputs, sizeof(float)); l.output = (float *)calloc(batch*l.outputs, sizeof(float)); for(i = 0; i < total*2; ++i){ l.biases[i] = .5; } l.forward = forward_yolo_layer; fprintf(stderr, "detection\n"); srand(0); return l; } /////////////////praser begin typedef struct{ char *type; list *options; }section; list *read_cfg(char *filename); LAYER_TYPE string_to_layer_type(char * type) { if (strcmp(type, "[shortcut]")==0) return SHORTCUT; if (strcmp(type, "[crop]")==0) return CROP; if (strcmp(type, "[cost]")==0) return COST; if (strcmp(type, "[detection]")==0) return DETECTION; if (strcmp(type, "[region]")==0) return REGION; if (strcmp(type, "[yolo]")==0) return YOLO; if (strcmp(type, "[local]")==0) return LOCAL; if (strcmp(type, "[conv]")==0 || strcmp(type, "[convolutional]")==0) return CONVOLUTIONAL; if (strcmp(type, "[deconv]")==0 || strcmp(type, "[deconvolutional]")==0) return DECONVOLUTIONAL; if (strcmp(type, "[activation]")==0) return ACTIVE; if (strcmp(type, "[logistic]")==0) return LOGXENT; if (strcmp(type, "[l2norm]")==0) return L2NORM; if (strcmp(type, "[net]")==0 || strcmp(type, "[network]")==0) return NETWORK; if (strcmp(type, "[crnn]")==0) return CRNN; if (strcmp(type, "[gru]")==0) return GRU; if (strcmp(type, "[lstm]") == 0) return LSTM; if (strcmp(type, "[rnn]")==0) return RNN; if (strcmp(type, "[conn]")==0 || strcmp(type, "[connected]")==0) return CONNECTED; if (strcmp(type, "[max]")==0 || strcmp(type, "[maxpool]")==0) return MAXPOOL; if (strcmp(type, "[reorg]")==0) return REORG; if (strcmp(type, "[avg]")==0 || strcmp(type, "[avgpool]")==0) return AVGPOOL; if (strcmp(type, "[dropout]")==0) return DROPOUT; if (strcmp(type, "[lrn]")==0 || strcmp(type, "[normalization]")==0) return NORMALIZATION; if (strcmp(type, "[batchnorm]")==0) return BATCHNORM; if (strcmp(type, "[soft]")==0 || strcmp(type, "[softmax]")==0) return SOFTMAX; if (strcmp(type, "[route]")==0) return ROUTE; if (strcmp(type, "[upsample]")==0) return UPSAMPLE; return BLANK; } void free_section(section *s) { free(s->type); node *n = s->options->front; while(n){ kvp *pair = (kvp *)n->val; free(pair->key); free(pair); node *next = n->next; free(n); n = next; } free(s->options); free(s); } void parse_data(char *data, float *a, int n) { int i; if(!data) return; char *curr = data; char *next = data; int done = 0; for(i = 0; i < n && !done; ++i){ while(*++next !='\0' && *next != ','); if(*next == '\0') done = 1; *next = '\0'; sscanf(curr, "%g", &a[i]); curr = next+1; } } typedef struct size_params{ int batch; int inputs; int h; int w; int c; int index; int time_steps; network *net; } size_params; layer parse_convolutional(list *options, size_params params) { int n = option_find_int(options, "filters",1); int size = option_find_int(options, "size",1); int stride = option_find_int(options, "stride",1); int pad = option_find_int_quiet(options, "pad",0); int padding = option_find_int_quiet(options, "padding",0); int groups = option_find_int_quiet(options, "groups", 1); if(pad) padding = size/2; char *activation_s = option_find_str(options, "activation", "logistic"); ACTIVATION activation = get_activation(activation_s); int batch,h,w,c; h = params.h; w = params.w; c = params.c; batch=params.batch; if(!(h && w && c)) error("Layer before convolutional layer must output image."); int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0); int binary = option_find_int_quiet(options, "binary", 0); int xnor = option_find_int_quiet(options, "xnor", 0); layer l = make_convolutional_layer(batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, params.net->adam); l.flipped = option_find_int_quiet(options, "flipped", 0); l.dot = option_find_float_quiet(options, "dot", 0); return l; } int *parse_yolo_mask(char *a, int *num) { int *mask = 0; if(a){ int len = strlen(a); int n = 1; int i; for(i = 0; i < len; ++i){ if (a[i] == ',') ++n; } mask = (int *)calloc(n, sizeof(int)); for(i = 0; i < n; ++i){ int val = atoi(a); mask[i] = val; a = strchr(a, ',')+1; } *num = n; } return mask; } layer parse_yolo(list *options, size_params params) { int classes = option_find_int(options, "classes", 20); int total = option_find_int(options, "num", 1); int num = total; char *a = option_find_str(options, "mask", 0); int *mask = parse_yolo_mask(a, &num); layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes); assert(l.outputs == params.inputs); l.max_boxes = option_find_int_quiet(options, "max",90); l.jitter = option_find_float(options, "jitter", .2); l.ignore_thresh = option_find_float(options, "ignore_thresh", .5); l.truth_thresh = option_find_float(options, "truth_thresh", 1); l.random = option_find_int_quiet(options, "random", 0); a = option_find_str(options, "anchors", 0); if(a){ int len = strlen(a); int n = 1; int i; for(i = 0; i < len; ++i){ if (a[i] == ',') ++n; } for(i = 0; i < n; ++i){ float bias = atof(a); l.biases[i] = bias; a = strchr(a, ',')+1; } } return l; } layer parse_shortcut(list *options, size_params params, network *net) { char *l = option_find(options, "from"); int index = atoi(l); if(index < 0) index = params.index + index; int batch = params.batch; layer from = net->layers[index]; layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c); char *activation_s = option_find_str(options, "activation", "linear"); ACTIVATION activation = get_activation(activation_s); s.activation = activation; s.alpha = option_find_float_quiet(options, "alpha", 1); s.beta = option_find_float_quiet(options, "beta", 1); return s; } layer parse_upsample(list *options, size_params params, network *net) { int stride = option_find_int(options, "stride",2); layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride); l.scale = option_find_float_quiet(options, "scale", 1); return l; } layer parse_route(list *options, size_params params, network *net) { char *l = option_find(options, "layers"); int len = strlen(l); if(!l) error("Route Layer must specify input layers"); int n = 1; int i; for(i = 0; i < len; ++i){ if (l[i] == ',') ++n; } int *layers = (int *)calloc(n, sizeof(int)); int *sizes = (int *)calloc(n, sizeof(int)); for(i = 0; i < n; ++i){ int index = atoi(l); l = strchr(l, ',')+1; if(index < 0) index = params.index + index; layers[i] = index; sizes[i] = net->layers[index].outputs; } int batch = params.batch; layer route_layer = make_route_layer(batch, n, layers, sizes); layer first = net->layers[layers[0]]; route_layer.out_w = first.out_w; route_layer.out_h = first.out_h; route_layer.out_c = first.out_c; for(i = 1; i < n; ++i){ int index = layers[i]; layer next = net->layers[index]; if(next.out_w == first.out_w && next.out_h == first.out_h){ route_layer.out_c += next.out_c; }else{ route_layer.out_h = route_layer.out_w = route_layer.out_c = 0; } } return route_layer; } void softmax(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -FLT_MAX; for(i = 0; i < n; ++i){ if(input[i*stride] > largest) largest = input[i*stride]; } for(i = 0; i < n; ++i){ float e = exp(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int g, b; for(b = 0; b < batch; ++b){ for(g = 0; g < groups; ++g){ softmax(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } } } void forward_region_layer(const layer l, network net) { int i,j,b,t,n; memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float)); #ifndef GPU for (b = 0; b < l.batch; ++b){ for(n = 0; n < l.n; ++n){ int index = entry_index(l, b, n*l.w*l.h, 0); activate_array(l.output + index, 2*l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, l.coords); if(!l.background) activate_array(l.output + index, l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, l.coords + 1); //if(!l.softmax) activate_array(l.output + index, l.classes*l.w*l.h, LOGISTIC); } } if (l.softmax){ int index = entry_index(l, 0, 0, l.coords + !l.background); softmax_cpu(net.input + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output + index); } // double time1,time2; // time1 = what_time_is_it_now(); // char line[256]; // FILE *fp3; // char filename[256]; // sprintf(filename, "yolo_region_input_float32_%d.txt", 13*13*425); // printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename); // if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n"); // int x; // for( x = 0; x < l.outputs; x++) // { // sprintf(line, "%f\n", net.input[x]); // if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n"); // } // fclose(fp3); // time2 = what_time_is_it_now(); // printf("Predicted in %f seconds.\n",time2 - time1); #endif if(!net.train) return; } layer make_region_layer(int batch, int w, int h, int n, int classes, int coords) { layer l; memset(&l,0,sizeof(layer)); l.type = REGION; l.n = n; l.batch = batch; l.h = h; l.w = w; l.c = n*(classes + coords + 1); l.out_w = l.w; l.out_h = l.h; l.out_c = l.c; l.classes = classes; l.coords = coords; l.biases = (float *)calloc(n*2, sizeof(float)); l.outputs = h*w*n*(classes + coords + 1); l.inputs = l.outputs; l.truths = 30*(l.coords + 1); l.output = (float *)calloc(batch*l.outputs, sizeof(float)); int i; for(i = 0; i < n*2; ++i){ l.biases[i] = .5; } l.forward = forward_region_layer; fprintf(stderr, "detection\n"); srand(0); return l; } layer parse_region(list *options, size_params params) { int coords = option_find_int(options, "coords", 4); int classes = option_find_int(options, "classes", 20); int num = option_find_int(options, "num", 1); layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords); assert(l.outputs == params.inputs); l.log = option_find_int_quiet(options, "log", 0); l.sqrt = option_find_int_quiet(options, "sqrt", 0); l.softmax = option_find_int(options, "softmax", 0); l.background = option_find_int_quiet(options, "background", 0); l.max_boxes = option_find_int_quiet(options, "max",30); l.jitter = option_find_float(options, "jitter", .2); l.rescore = option_find_int_quiet(options, "rescore",0); l.thresh = option_find_float(options, "thresh", .5); l.classfix = option_find_int_quiet(options, "classfix", 0); l.absolute = option_find_int_quiet(options, "absolute", 0); l.random = option_find_int_quiet(options, "random", 0); l.coord_scale = option_find_float(options, "coord_scale", 1); l.object_scale = option_find_float(options, "object_scale", 1); l.noobject_scale = option_find_float(options, "noobject_scale", 1); l.mask_scale = option_find_float(options, "mask_scale", 1); l.class_scale = option_find_float(options, "class_scale", 1); l.bias_match = option_find_int_quiet(options, "bias_match",0); char *tree_file = option_find_str(options, "tree", 0); // if (tree_file) l.softmax_tree = read_tree(tree_file); char *map_file = option_find_str(options, "map", 0); // if (map_file) l.map = read_map(map_file); char *a = option_find_str(options, "anchors", 0); if(a){ int len = strlen(a); int n = 1; int i; for(i = 0; i < len; ++i){ if (a[i] == ',') ++n; } for(i = 0; i < n; ++i){ float bias = atof(a); l.biases[i] = bias; a = strchr(a, ',')+1; } } return l; } void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int b,i,j,k; int out_c = c/(stride*stride); for(b = 0; b < batch; ++b){ for(k = 0; k < c; ++k){ for(j = 0; j < h; ++j){ for(i = 0; i < w; ++i){ int in_index = i + w*(j + h*(k + c*b)); int c2 = k % out_c; int offset = k / out_c; int w2 = i*stride + offset % stride; int h2 = j*stride + offset / stride; int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; } } } } } void forward_reorg_layer(const layer l, network net) { int i; //if(l.flatten){ // memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float)); // if(l.reverse){ // flatten(l.output, l.w*l.h, l.c, l.batch, 0); // }else{ // flatten(l.output, l.w*l.h, l.c, l.batch, 1); // } //} else if (l.extra) { // for(i = 0; i < l.batch; ++i){ // copy_cpu(l.inputs, net.input + i*l.inputs, 1, l.output + i*l.outputs, 1); // } //} else if (l.reverse){ // reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output); //} else { reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output); //} } layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra) { layer l; memset(&l,0,sizeof(layer)); l.type = REORG; l.batch = batch; l.stride = stride; l.extra = extra; l.h = h; l.w = w; l.c = c; l.flatten = flatten; if(reverse){ l.out_w = w*stride; l.out_h = h*stride; l.out_c = c/(stride*stride); }else{ l.out_w = w/stride; l.out_h = h/stride; l.out_c = c*(stride*stride); } l.reverse = reverse; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; if(l.extra){ l.out_w = l.out_h = l.out_c = 0; l.outputs = l.inputs + l.extra; } if(extra){ fprintf(stderr, "reorg %4d -> %4d\n", l.inputs, l.outputs); } else { fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); } int output_size = l.outputs * batch; //l.output = (float *)calloc(output_size, sizeof(float)); l.forward = forward_reorg_layer; return l; } layer parse_reorg(list *options, size_params params) { int stride = option_find_int(options, "stride",1); int reverse = option_find_int_quiet(options, "reverse",0); int flatten = option_find_int_quiet(options, "flatten",0); int extra = option_find_int_quiet(options, "extra",0); int batch,h,w,c; h = params.h; w = params.w; c = params.c; batch=params.batch; if(!(h && w && c)) error("Layer before reorg layer must output image."); layer layer = make_reorg_layer(batch,w,h,c,stride,reverse, flatten, extra); return layer; } void forward_maxpool_layer(layer l, network net) { int b,i,j,k,m,n; int w_offset = -l.pad; int h_offset = -l.pad; int h = l.out_h; int w = l.out_w; int c = l.c; for(b = 0; b < l.batch; ++b){ for(k = 0; k < c; ++k){ for(i = 0; i < h; ++i){ for(j = 0; j < w; ++j){ int out_index = j + w*(i + h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for(n = 0; n < l.size; ++n){ for(m = 0; m < l.size; ++m){ int cur_h = h_offset + i*l.stride + n; int cur_w = w_offset + j*l.stride + m; int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c)); int valid = (cur_h >= 0 && cur_h < l.h && cur_w >= 0 && cur_w < l.w); float val = (valid != 0) ? net.input[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } l.output[out_index] = max; l.indexes[out_index] = max_i; } } } } } layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding) { layer l; memset(&l,0,sizeof(layer)); l.type = MAXPOOL; l.batch = batch; l.h = h; l.w = w; l.c = c; l.pad = padding; l.out_w = (w + padding - size)/stride + 1; l.out_h = (h + padding - size)/stride + 1; l.out_c = c; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; l.size = size; l.stride = stride; int output_size = l.out_h * l.out_w * l.out_c * batch; fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } layer parse_maxpool(list *options, size_params params) { int stride = option_find_int(options, "stride",1); int size = option_find_int(options, "size",stride); int padding = option_find_int_quiet(options, "padding", size-1); int batch,h,w,c; h = params.h; w = params.w; c = params.c; batch=params.batch; if(!(h && w && c)) error("Layer before maxpool layer must output image."); layer maxpool_layer = make_maxpool_layer(batch,h,w,c,size,stride,padding); return maxpool_layer; } learning_rate_policy get_policy(char *s) { if (strcmp(s, "random")==0) return RANDOM; if (strcmp(s, "poly")==0) return POLY; if (strcmp(s, "constant")==0) return CONSTANT; if (strcmp(s, "step")==0) return STEP; if (strcmp(s, "exp")==0) return EXP; if (strcmp(s, "sigmoid")==0) return SIG; if (strcmp(s, "steps")==0) return STEPS; fprintf(stderr, "Couldn't find policy %s, going with constant\n", s); return CONSTANT; } void parse_net_options(list *options, network *net) { net->batch = option_find_int(options, "batch",1); net->learning_rate = option_find_float(options, "learning_rate", .001); net->momentum = option_find_float(options, "momentum", .9); net->decay = option_find_float(options, "decay", .0001); int subdivs = option_find_int(options, "subdivisions",1); net->time_steps = option_find_int_quiet(options, "time_steps",1); net->notruth = option_find_int_quiet(options, "notruth",0); net->batch /= subdivs; net->batch *= net->time_steps; net->subdivisions = subdivs; net->random = option_find_int_quiet(options, "random", 0); net->adam = option_find_int_quiet(options, "adam", 0); if(net->adam){ net->B1 = option_find_float(options, "B1", .9); net->B2 = option_find_float(options, "B2", .999); net->eps = option_find_float(options, "eps", .0000001); } net->h = option_find_int_quiet(options, "height",0); net->w = option_find_int_quiet(options, "width",0); net->c = option_find_int_quiet(options, "channels",0); net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c); net->max_crop = option_find_int_quiet(options, "max_crop",net->w*2); net->min_crop = option_find_int_quiet(options, "min_crop",net->w); net->max_ratio = option_find_float_quiet(options, "max_ratio", (float) net->max_crop / net->w); net->min_ratio = option_find_float_quiet(options, "min_ratio", (float) net->min_crop / net->w); net->center = option_find_int_quiet(options, "center",0); net->clip = option_find_float_quiet(options, "clip", 0); net->angle = option_find_float_quiet(options, "angle", 0); net->aspect = option_find_float_quiet(options, "aspect", 1); net->saturation = option_find_float_quiet(options, "saturation", 1); net->exposure = option_find_float_quiet(options, "exposure", 1); net->hue = option_find_float_quiet(options, "hue", 0); if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied"); char *policy_s = option_find_str(options, "policy", "constant"); net->policy = get_policy(policy_s); net->burn_in = option_find_int_quiet(options, "burn_in", 0); net->power = option_find_float_quiet(options, "power", 4); if(net->policy == STEP){ net->step = option_find_int(options, "step", 1); net->scale = option_find_float(options, "scale", 1); } else if (net->policy == STEPS){ char *l = option_find(options, "steps"); char *p = option_find(options, "scales"); if(!l || !p) error("STEPS policy must have steps and scales in cfg file"); int len = strlen(l); int n = 1; int i; for(i = 0; i < len; ++i){ if (l[i] == ',') ++n; } int *steps = (int *)calloc(n, sizeof(int)); float *scales = (float *)calloc(n, sizeof(float)); for(i = 0; i < n; ++i){ int step = atoi(l); float scale = atof(p); l = strchr(l, ',')+1; p = strchr(p, ',')+1; steps[i] = step; scales[i] = scale; } net->scales = scales; net->steps = steps; net->num_steps = n; } else if (net->policy == EXP){ net->gamma = option_find_float(options, "gamma", 1); } else if (net->policy == SIG){ net->gamma = option_find_float(options, "gamma", 1); net->step = option_find_int(options, "step", 1); } else if (net->policy == POLY || net->policy == RANDOM){ } net->max_batches = option_find_int(options, "max_batches", 0); } int is_network(section *s) { return (strcmp(s->type, "[net]")==0 || strcmp(s->type, "[network]")==0); } network *parse_network_cfg(char *filename) { list *sections = read_cfg(filename); node *n = sections->front; if(!n) error("Config file has no sections"); network *net = make_network(sections->size - 1); net->gpu_index = -1; size_params params; section *s = (section *)n->val; list *options = s->options; if(!is_network(s)) error("First section must be [net] or [network]"); parse_net_options(options, net); params.h = net->h; params.w = net->w; params.c = net->c; params.inputs = net->inputs; params.batch = net->batch; params.time_steps = net->time_steps; params.net = net; size_t workspace_size = 0; n = n->next; int count = 0; free_section(s); fprintf(stderr, "layer filters size input output\n"); while(n){ params.index = count; fprintf(stderr, "%5d ", count); s = (section *)n->val; options = s->options; //layer l = {0}; layer l; memset(&l,0,sizeof(layer)); LAYER_TYPE lt = string_to_layer_type(s->type); if(lt == CONVOLUTIONAL){ l = parse_convolutional(options, params); }else if(lt == YOLO){ l = parse_yolo(options, params); }else if(lt == ROUTE){ l = parse_route(options, params, net); }else if(lt == UPSAMPLE){ l = parse_upsample(options, params, net); }else if(lt == SHORTCUT){ l = parse_shortcut(options, params, net); }else if(lt == REGION){ l = parse_region(options, params); }else if(lt == YOLO){ l = parse_yolo(options, params); }else if(lt == MAXPOOL){ l = parse_maxpool(options, params); }else if(lt == REORG){ l = parse_reorg(options, params); }else{ fprintf(stderr, "Type not recognized: %s\n", s->type); } l.clip = net->clip; l.truth = option_find_int_quiet(options, "truth", 0); l.onlyforward = option_find_int_quiet(options, "onlyforward", 0); l.stopbackward = option_find_int_quiet(options, "stopbackward", 0); l.dontsave = option_find_int_quiet(options, "dontsave", 0); // l.dontload = option_find_int_quiet(options, "dontload", 0); // l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0); //l.learning_rate_scale = option_find_float_quiet(options, "learning_rate", 1); l.smooth = option_find_float_quiet(options, "smooth", 0); option_unused(options); net->layers[count] = l; if (l.workspace_size > workspace_size) workspace_size = l.workspace_size; free_section(s); n = n->next; ++count; if(n){ params.h = l.out_h; params.w = l.out_w; params.c = l.out_c; params.inputs = l.outputs; } } free_list(sections); layer out = get_network_output_layer(net); net->outputs = out.outputs; net->output = out.output; //net->input = (float *)calloc(net->inputs*net->batch, sizeof(float)); workspace_size = 0;//donot calloc workspace //if(workspace_size){ // //printf("%ld\n", workspace_size); // net->workspace = (float *)calloc(1, workspace_size); //} return net; } list *read_cfg(char *filename) { FILE *file = fopen(filename, "r"); if(file == 0) file_error(filename); char *line; int nu = 0; list *options = make_list(); section *current = 0; while((line=fgetl(file)) != 0){ ++ nu; strip(line); switch(line[0]){ case '[': current = (section *)malloc(sizeof(section)); list_insert(options, current); current->options = make_list(); current->type = line; break; case '\0': case '#': case ';': free(line); break; default: if(!read_option(line, current->options)){ fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line); free(line); } break; } } fclose(file); return options; } void load_convolutional_weights(layer l, FILE *fp) { int num = l.nweights; fread(l.biases, sizeof(float), l.n, fp); if (l.batch_normalize){ fread(l.scales, sizeof(float), l.n, fp); fread(l.rolling_mean, sizeof(float), l.n, fp); fread(l.rolling_variance, sizeof(float), l.n, fp); } fread(l.weights, sizeof(float), num, fp); } void load_weights_upto(network *net, char *filename, int start, int cutoff) { fprintf(stderr, "Loading weights from %s...", filename); fflush(stdout); FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); int major; int minor; int revision; fread(&major, sizeof(int), 1, fp); fread(&minor, sizeof(int), 1, fp); fread(&revision, sizeof(int), 1, fp); printf("major=%d;minor=%d;revision=%d\n",major,minor,revision);// 0 2 0 printf("if true ro false:%d\n",(major*10 + minor) >= 2 && major < 1000 && minor < 1000); if ((major*10 + minor) >= 2 && major < 1000 && minor < 1000){ //fread(net->seen, sizeof(size_t), 1, fp); fread(net->seen, sizeof(size_t), 1, fp); fread(net->seen, sizeof(size_t), 1, fp); }else { int iseen = 0; fread(&iseen, sizeof(int), 1, fp); *net->seen = iseen; } //printf("sizeof(size_t)=%u\n",sizeof(size_t));// in my PC is 4 int i; for(i = start; i < net->n && i < cutoff; ++i){ layer l = net->layers[i]; if(l.type == CONVOLUTIONAL){ load_convolutional_weights(l, fp); } } fprintf(stderr, "Done!\n"); fclose(fp); } void load_weights(network *net, char *filename) { load_weights_upto(net, filename, 0, net->n); } /////////////////praser end /////////////////network begin load_args get_base_args(network *net) { load_args args = {0}; args.w = net->w; args.h = net->h; args.size = net->w; args.min = net->min_crop; args.max = net->max_crop; args.angle = net->angle; args.aspect = net->aspect; args.exposure = net->exposure; args.center = net->center; args.saturation = net->saturation; args.hue = net->hue; return args; } network *load_network(char *cfg, char *weights, int clear) { network *net = parse_network_cfg(cfg); //if(weights && weights[0] != 0){ // load_weights(net, weights); //} if(clear) (*net->seen) = 0; return net; } char *get_layer_string(LAYER_TYPE a) { switch(a){ case CONVOLUTIONAL: return "convolutional"; case ACTIVE: return "activation"; case LOCAL: return "local"; case DECONVOLUTIONAL: return "deconvolutional"; case CONNECTED: return "connected"; case RNN: return "rnn"; case GRU: return "gru"; case LSTM: return "lstm"; case CRNN: return "crnn"; case MAXPOOL: return "maxpool"; case REORG: return "reorg"; case AVGPOOL: return "avgpool"; case SOFTMAX: return "softmax"; case DETECTION: return "detection"; case REGION: return "region"; case YOLO: return "yolo"; case DROPOUT: return "dropout"; case CROP: return "crop"; case COST: return "cost"; case ROUTE: return "route"; case SHORTCUT: return "shortcut"; case NORMALIZATION: return "normalization"; case BATCHNORM: return "batchnorm"; default: break; } return "none"; } network *make_network(int n) { network *net = (network *)calloc(1, sizeof(network)); net->n = n; net->layers = (layer *)calloc(net->n, sizeof(layer)); net->seen = (size_t *)calloc(1, sizeof(size_t)); net->t = (int *)calloc(1, sizeof(int)); net->cost = (float *)calloc(1, sizeof(float)); return net; } void forward_network(network *netp) { network net = *netp; int i; for(i = 0; i < net.n; ++i){ net.index = i; layer l = net.layers[i]; l.forward(l, net); net.input = l.output; // printf("layer [%d]\n",i); } } void set_temp_network(network *net, float t) { int i; for(i = 0; i < net->n; ++i){ net->layers[i].temperature = t; } } void set_batch_network(network *net, int b) { net->batch = b; int i; for(i = 0; i < net->n; ++i){ net->layers[i].batch = b; } } float *network_predict(network *net, float *input) { network orig = *net; net->input = input; net->truth = 0; net->train = 0; net->delta = 0; forward_network(net); float *out = net->output; *net = orig; return out; } int yolo_num_detections(layer l, float thresh) { int i, n; int count = 0; for (i = 0; i < l.w*l.h; ++i){ for(n = 0; n < l.n; ++n){ int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4); if(l.output[obj_index] > thresh){ ++count; } } } return count; } int num_detections(network *net, float thresh) { int i; int s = 0; for(i = 0; i < net->n; ++i){ layer l = net->layers[i]; if(l.type == YOLO){ s += yolo_num_detections(l, thresh); } if(l.type == DETECTION || l.type == REGION){ s += l.w*l.h*l.n; } } return s; } detection *make_network_boxes(network *net, float thresh, int *num) { layer l = net->layers[net->n - 1]; int i; int nboxes = num_detections(net, thresh); //printf("num_detections nboxes = %d\n",nboxes); if(num) *num = nboxes; detection *dets = (detection *)calloc(nboxes, sizeof(detection)); for(i = 0; i < nboxes; ++i){ dets[i].prob = (float *)calloc(l.classes, sizeof(float)); } return dets; } box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride) { box b; b.x = (i + x[index + 0*stride]) / lw; b.y = (j + x[index + 1*stride]) / lh; b.w = exp(x[index + 2*stride]) * biases[2*n] / w; b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h; return b; } void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative) { int i; int new_w=0; int new_h=0; if (((float)netw/w) < ((float)neth/h)) { new_w = netw; new_h = (h * netw)/w; } else { new_h = neth; new_w = (w * neth)/h; } for (i = 0; i < n; ++i){ box b = dets[i].bbox; b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw); b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth); b.w *= (float)netw/new_w; b.h *= (float)neth/new_h; if(!relative){ b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets) { int i,j,n; float *predictions = l.output; // if (l.batch == 2) avg_flipped_yolo(l); int count = 0; for (i = 0; i < l.w*l.h; ++i){ int row = i / l.w; int col = i % l.w; for(n = 0; n < l.n; ++n){ int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4); float objectness = predictions[obj_index]; if(objectness <= thresh) continue; int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h); dets[count].objectness = objectness; dets[count].classes = l.classes; for(j = 0; j < l.classes; ++j){ int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j); float prob = objectness*predictions[class_index]; dets[count].prob[j] = (prob > thresh) ? prob : 0; } ++count; } } correct_yolo_boxes(dets, count, w, h, netw, neth, relative); return count; } box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride) { box b; b.x = (i + x[index + 0*stride]) / w; b.y = (j + x[index + 1*stride]) / h; b.w = exp(x[index + 2*stride]) * biases[2*n] / w; b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h; return b; } void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative) { int i; int new_w=0; int new_h=0; if (((float)netw/w) < ((float)neth/h)) { new_w = netw; new_h = (h * netw)/w; } else { new_h = neth; new_w = (w * neth)/h; } for (i = 0; i < n; ++i){ box b = dets[i].bbox; b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw); b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth); b.w *= (float)netw/new_w; b.h *= (float)neth/new_h; if(!relative){ b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets) { int i,j,n,z; float *predictions = l.output; if (l.batch == 2) { float *flip = l.output + l.outputs; for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w/2; ++i) { for (n = 0; n < l.n; ++n) { for(z = 0; z < l.classes + l.coords + 1; ++z){ int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i; int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1); float swap = flip[i1]; flip[i1] = flip[i2]; flip[i2] = swap; if(z == 0){ flip[i1] = -flip[i1]; flip[i2] = -flip[i2]; } } } } } for(i = 0; i < l.outputs; ++i){ l.output[i] = (l.output[i] + flip[i])/2.; } } for (i = 0; i < l.w*l.h; ++i){ int row = i / l.w; int col = i % l.w; for(n = 0; n < l.n; ++n){ int index = n*l.w*l.h + i; for(j = 0; j < l.classes; ++j){ dets[index].prob[j] = 0; } int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4); float scale = l.background ? 1 : predictions[obj_index]; dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h, l.w*l.h); dets[index].objectness = scale > thresh ? scale : 0; if(dets[index].mask){ for(j = 0; j < l.coords - 4; ++j){ dets[index].mask[j] = l.output[mask_index + j*l.w*l.h]; } } int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background); if(dets[index].objectness){ for(j = 0; j < l.classes; ++j){ int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } } } correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative); } void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets) { int j; for(j = 0; j < net->n; ++j){ layer l = net->layers[j]; if(l.type == YOLO){ int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets); dets += count; } if(l.type == REGION){ get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets); dets += l.w*l.h*l.n; } } } detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num) { detection *dets = make_network_boxes(net, thresh, num); fill_network_boxes(net, w, h, thresh, hier, map, relative, dets); return dets; } void free_detections(detection *dets, int n) { int i; for(i = 0; i < n; ++i){ free(dets[i].prob); if(dets[i].mask) free(dets[i].mask); } free(dets); } int network_width(network *net){return net->w;} int network_height(network *net){return net->h;} layer get_network_output_layer(network *net) { int i; for(i = net->n - 1; i >= 0; --i){ if(net->layers[i].type != COST) break; } return net->layers[i]; } void free_network(network *net) { int i; for(i = 0; i < net->n; ++i){ free_layer(net->layers[i]); } free(net->layers); if(net->input) free(net->input); if(net->truth) free(net->truth); free(net); } layer network_output_layer(network *net) { int i; for(i = net->n - 1; i >= 0; --i){ if(net->layers[i].type != COST) break; } return net->layers[i]; } int network_inputs(network *net) { return net->layers[0].inputs; } int network_outputs(network *net) { return network_output_layer(net).outputs; } float *network_output(network *net) { return network_output_layer(net).output; } //////////////////network end //////////////////////box begin int nms_comparator(const void *pa, const void *pb) { detection a = *(detection *)pa; detection b = *(detection *)pb; float diff = 0; if(b.sort_class >= 0){ diff = a.prob[b.sort_class] - b.prob[b.sort_class]; } else { diff = a.objectness - b.objectness; } if(diff < 0) return 1; else if(diff > 0) return -1; return 0; } float overlap(float x1, float w1, float x2, float w2) { float l1 = x1 - w1/2; float l2 = x2 - w2/2; float left = l1 > l2 ? l1 : l2; float r1 = x1 + w1/2; float r2 = x2 + w2/2; float right = r1 < r2 ? r1 : r2; return right - left; } float box_intersection(box a, box b) { float w = overlap(a.x, a.w, b.x, b.w); float h = overlap(a.y, a.h, b.y, b.h); if(w < 0 || h < 0) return 0; float area = w*h; return area; } float box_union(box a, box b) { float i = box_intersection(a, b); float u = a.w*a.h + b.w*b.h - i; return u; } float box_iou(box a, box b) { return box_intersection(a, b)/box_union(a, b); } void do_nms_sort(detection *dets, int total, int classes, float thresh) { int i, j, k; k = total-1; for(i = 0; i <= k; ++i){ if(dets[i].objectness == 0){ detection swap = dets[i]; dets[i] = dets[k]; dets[k] = swap; --k; --i; } } total = k+1; for(k = 0; k < classes; ++k){ for(i = 0; i < total; ++i){ dets[i].sort_class = k; } qsort(dets, total, sizeof(detection), nms_comparator); for(i = 0; i < total; ++i){ if(dets[i].prob[k] == 0) continue; box a = dets[i].bbox; for(j = i+1; j < total; ++j){ box b = dets[j].bbox; if (box_iou(a, b) > thresh){ dets[j].prob[k] = 0; } } } } } //////////////////////box end //////////////////////image begin float colors[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} }; float get_color(int c, int x, int max) { float ratio = ((float)x/max)*5; int i = floor(ratio); int j = ceil(ratio); ratio -= i; float r = (1-ratio) * colors[i][c] + ratio*colors[j][c]; //printf("%f\n", r); return r; } static float get_pixel_extend(image m, int x, int y, int c) { if(x < 0 || x >= m.w || y < 0 || y >= m.h) return 0; /* if(x < 0) x = 0; if(x >= m.w) x = m.w-1; if(y < 0) y = 0; if(y >= m.h) y = m.h-1; */ if(c < 0 || c >= m.c) return 0; return get_pixel(m, x, y, c); } void composite_image(image source, image dest, int dx, int dy) { int x,y,k; for(k = 0; k < source.c; ++k){ for(y = 0; y < source.h; ++y){ for(x = 0; x < source.w; ++x){ float val = get_pixel(source, x, y, k); float val2 = get_pixel_extend(dest, dx+x, dy+y, k); set_pixel(dest, dx+x, dy+y, k, val * val2); } } } } image border_image(image a, int border) { image b = make_image(a.w + 2*border, a.h + 2*border, a.c); int x,y,k; for(k = 0; k < b.c; ++k){ for(y = 0; y < b.h; ++y){ for(x = 0; x < b.w; ++x){ float val = get_pixel_extend(a, x - border, y - border, k); if(x - border < 0 || x - border >= a.w || y - border < 0 || y - border >= a.h) val = 1; set_pixel(b, x, y, k, val); } } } return b; } image copy_image(image p) { image copy = p; copy.data = (float *)calloc(p.h*p.w*p.c, sizeof(float)); memcpy(copy.data, p.data, p.h*p.w*p.c*sizeof(float)); return copy; } image tile_images(image a, image b, int dx) { if(a.w == 0) return copy_image(b); image c = make_image(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, (a.c > b.c) ? a.c : b.c); fill_cpu(c.w*c.h*c.c, 1, c.data, 1); embed_image(a, c, 0, 0); composite_image(b, c, a.w + dx, 0); return c; } image get_label(image **characters, char *string, int size) { size = size/10; if(size > 7) size = 7; image label = make_empty_image(0,0,0); while(*string){ image l = characters[size][(int)*string]; image n = tile_images(label, l, -size - 1 + (size+1)/2); free_image(label); label = n; ++string; } image b = border_image(label, label.h*.25); free_image(label); return b; } void draw_label(image a, int r, int c, image label, const float *rgb) { int w = label.w; int h = label.h; if (r - h >= 0) r = r - h; int i, j, k; for(j = 0; j < h && j + r < a.h; ++j){ for(i = 0; i < w && i + c < a.w; ++i){ for(k = 0; k < label.c; ++k){ float val = get_pixel(label, i, j, k); set_pixel(a, i+c, j+r, k, rgb[k] * val); } } } } void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b) { //normalize_image(a); int i; if(x1 < 0) x1 = 0; if(x1 >= a.w) x1 = a.w-1; if(x2 < 0) x2 = 0; if(x2 >= a.w) x2 = a.w-1; if(y1 < 0) y1 = 0; if(y1 >= a.h) y1 = a.h-1; if(y2 < 0) y2 = 0; if(y2 >= a.h) y2 = a.h-1; for(i = x1; i <= x2; ++i){ a.data[i + y1*a.w + 0*a.w*a.h] = r; a.data[i + y2*a.w + 0*a.w*a.h] = r; a.data[i + y1*a.w + 1*a.w*a.h] = g; a.data[i + y2*a.w + 1*a.w*a.h] = g; a.data[i + y1*a.w + 2*a.w*a.h] = b; a.data[i + y2*a.w + 2*a.w*a.h] = b; } for(i = y1; i <= y2; ++i){ a.data[x1 + i*a.w + 0*a.w*a.h] = r; a.data[x2 + i*a.w + 0*a.w*a.h] = r; a.data[x1 + i*a.w + 1*a.w*a.h] = g; a.data[x2 + i*a.w + 1*a.w*a.h] = g; a.data[x1 + i*a.w + 2*a.w*a.h] = b; a.data[x2 + i*a.w + 2*a.w*a.h] = b; } } void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b) { int i; for(i = 0; i < w; ++i){ draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b); } } image float_to_image(int w, int h, int c, float *data) { image out = make_empty_image(w,h,c); out.data = data; return out; } image threshold_image(image im, float thresh) { int i; image t = make_image(im.w, im.h, im.c); for(i = 0; i < im.w*im.h*im.c; ++i){ t.data[i] = im.data[i]>thresh ? 1 : 0; } return t; } void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes) { int i,j; for(i = 0; i < num; ++i){ char labelstr[4096] = {0}; int class_t = -1; for(j = 0; j < classes; ++j){ if (dets[i].prob[j] > thresh){ if (class_t < 0) { strcat(labelstr, names[j]); class_t = j; } else { strcat(labelstr, ", "); strcat(labelstr, names[j]); } printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100); } } if(class_t >= 0){ int width = im.h * .006; //printf("%d %s: %.0f%%\n", i, names[class], prob*100); int offset = class_t*123457 % classes; float red = get_color(2,offset,classes); float green = get_color(1,offset,classes); float blue = get_color(0,offset,classes); float rgb[3]; //width = prob*20+2; rgb[0] = red; rgb[1] = green; rgb[2] = blue; box b = dets[i].bbox; //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); int left = (b.x-b.w/2.)*im.w; int right = (b.x+b.w/2.)*im.w; int top = (b.y-b.h/2.)*im.h; int bot = (b.y+b.h/2.)*im.h; if(left < 0) left = 0; if(right > im.w-1) right = im.w-1; if(top < 0) top = 0; if(bot > im.h-1) bot = im.h-1; draw_box_width(im, left, top, right, bot, width, red, green, blue); if (alphabet) { image label = get_label(alphabet, labelstr, (im.h*.03)); draw_label(im, top + width, left, label, rgb); free_image(label); } if (dets[i].mask){ image mask = float_to_image(14, 14, 1, dets[i].mask); image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h); image tmask = threshold_image(resized_mask, .5); embed_image(tmask, im, left, top); free_image(mask); free_image(resized_mask); free_image(tmask); } } } } //////////////////////image end //////////////////////////HLS begin //#define MIN(x,y) ((x)<(y)?(x):(y)) //#define S 2 //#define K 3 // //#define Tn 1 //#define Tm 16 //#define Tr 13 //#define Tc 13 //#define OnChipIB_Width ((Tc-1)*S+K) //#define OnChipIB_Height ((Tr-1)*S+K) #define MAX(x,y) ((x)>(y)?(x):(y)) #define MIN(x,y) ((x)<(y)?(x):(y)) #define S 2 #define K 3 #define Tn 4 #define Tm 32 #define Tr 26 #define Tc 26 #define OnChipIB_Width ((Tc-1)*S+K) #define OnChipIB_Height ((Tr-1)*S+K) #define ALPHA_BETA_MAX_NUM 1024 #define INTERWIDTH 20 #define HPAGESIZE (2*1024*1024) void copy_mem2dev(uint8_t *orig,uint32_t byte_num, unsigned long in_buffer) { int fd = open("/dev/mem", O_RDWR); unsigned char *virtual_addr; uint32_t RequestByteNum;// must page if(byte_num%(HPAGESIZE)==0) RequestByteNum = byte_num; else { RequestByteNum = ceil(byte_num/(HPAGESIZE*1.0))*(HPAGESIZE); } virtual_addr = (unsigned char *)mmap(NULL, RequestByteNum, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)in_buffer); if(virtual_addr == MAP_FAILED) { perror("Virtual_addr_in mappong for absolute memory access failed!\n"); return; } memcpy(virtual_addr,orig,byte_num); munmap((void *)virtual_addr, byte_num); close(fd); } void copy_dev2mem(uint8_t *dst,uint32_t byte_num, unsigned long in_buffer) { int fd = open("/dev/mem", O_RDWR); unsigned char *virtual_addr; uint32_t RequestByteNum;// must page if(byte_num%(HPAGESIZE)==0) RequestByteNum = byte_num; else { RequestByteNum = ceil(byte_num/(HPAGESIZE*1.0))*(HPAGESIZE); } virtual_addr = (unsigned char *)mmap(NULL, RequestByteNum, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)in_buffer); if(virtual_addr == MAP_FAILED) { perror("Virtual_addr_in mappong for absolute memory access failed!\n"); return; } memcpy((uint8_t *)dst,virtual_addr,byte_num); munmap((void *)virtual_addr, byte_num); close(fd); } int copy_file2mem(char *bin_file,uint32_t byte_num,unsigned long in_buffer) { unsigned char *buffer = (unsigned char *)malloc(HPAGESIZE); if(buffer==NULL){ printf("cannot malloc buffer %d byte\n", HPAGESIZE); return -1; } printf("Total Byte Num = %d\n Address 0x%X\n", byte_num, in_buffer); FILE *fp; if( (fp = fopen(bin_file, "rb")) == NULL)fprintf(stderr,"CANNOT OPEN bin_file\n"); int rd_num; unsigned long offset = 0; while(rd_num = fread(buffer, sizeof(unsigned char), HPAGESIZE, fp)) { if(rd_num < HPAGESIZE) rd_num = HPAGESIZE; copy_mem2dev(buffer,rd_num, in_buffer+offset); printf("rd_num=%d, offset=%d\n", rd_num, offset); offset += rd_num; } printf("copy_file2mem offset=%d\n",offset); fclose(fp); free(buffer); return 0; } int copy_mem2file(char *bin_file,uint32_t byte_num,unsigned long in_buffer) { void *buffer = malloc(HPAGESIZE); if(buffer==NULL){ printf("cannot malloc buffer %d byte\n", HPAGESIZE); return -1; } FILE *fp; if( (fp = fopen(bin_file, "wb")) == NULL)fprintf(stderr,"CANNOT OPEN bin_file\n"); int x = byte_num; int addbyte; unsigned long offset = 0; while(addbyte=((x<HPAGESIZE)?x:(HPAGESIZE))) { copy_dev2mem((uint8_t *)buffer,addbyte, in_buffer+offset); fwrite(buffer , sizeof(unsigned char), addbyte, fp); x -= addbyte; offset += addbyte; } printf("copy_mem2file offset=%d\n",offset); fclose(fp); free(buffer); return 0; } //double what_time_is_it_now() //{ // struct timeval time; // if (gettimeofday(&time,NULL)){ // return 0; // } // return (double)time.tv_sec + (double)time.tv_usec * .000001; //} int YOLO2_FPGA(int In_Address,int Out_Address,int Weight_offset,int Beta_offset,const int InFM_num,const int OutFM_num, const int Kernel_size,const int Kernel_stride, const int Input_w,const int Input_h,const int Output_w,const int Output_h, const int Padding,const bool IsNL,const bool IsBN, const int TM,const int TN,const int TR,const int TC, const int mLoops,const int nLoops,const int rLoops,const int cLoops,const int LayerType, int InputQ,int OutputQ,int WeightQ,int BetaQ,unsigned int WEIGHT_BASE,unsigned int BETA_BASE) { int T2Rate; switch(Input_w) { case 26: T2Rate = 2; break; case 13: T2Rate = 4; break; default: T2Rate = 1; break; } const unsigned char TRow = (TR-1)*Kernel_stride+Kernel_size; int trow_loops = (int)ceil(((float)TRow/T2Rate)); unsigned int ap_idle; unsigned int ap_done; unsigned long int PhysicalAddress = YOLO2_BASEADDR; int map_len = 0x180; int fd = open("/dev/mem", O_RDWR); unsigned char *xbase_address; xbase_address = (unsigned char *)mmap(NULL, map_len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)PhysicalAddress); if(xbase_address == MAP_FAILED) { perror("1:Init Mapping memory for absolute memory access failed.\n"); return -1; } while(1) { ap_idle = ((ReadReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL) >> 2) && 0x1); if(ap_idle) break; } //#define WEIGHT_BASE (0x10000000) //#define BETA_BASE (0x1C25F000) WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_R_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT1_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT2_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT3_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_R_DATA, Out_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT1_DATA, Out_Address); // WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT2_DATA, Out_Address); // WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT3_DATA, Out_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_WEIGHT_DATA, WEIGHT_BASE + Weight_offset*4); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_BETA_DATA, BETA_BASE + Beta_offset*4); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INFM_NUM_DATA, InFM_num); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTFM_NUM_DATA, OutFM_num); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_KERNEL_SIZE_DATA, Kernel_size); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_KERNEL_STRIDE_DATA, Kernel_stride); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_W_DATA, Input_w); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_H_DATA, Input_h); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_W_DATA, Output_w); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_H_DATA, Output_h); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_PADDING_DATA, Padding); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_ISNL_DATA, IsNL); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_ISBN_DATA, IsBN); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TM_DATA, TM); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TN_DATA, TN); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TR_DATA, TR); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TC_DATA, TC); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_MLOOPS_DATA, mLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_NLOOPS_DATA, nLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_RLOOPS_DATA, rLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_CLOOPS_DATA, cLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_LAYERTYPE_DATA, LayerType); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUTQ_DATA, InputQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUTQ_DATA, OutputQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_WEIGHTQ_DATA, WeightQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_BETAQ_DATA, BetaQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TROW_LOOPS_DATA, trow_loops); // double time1,time2; // time1 = what_time_is_it_now(); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_GIE, 0x0); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL, 0x1);//Start while(1) { ap_done = ((ReadReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL) >> 1) && 0x1); if(ap_done) break; } // time2 = what_time_is_it_now(); // printf("START TO DONE in %f seconds.\n",time2 - time1); munmap((void *)xbase_address, map_len); close(fd); return 0; } ////////////////////////////////////////////////////////PL v3 end void yolov2_hls_ps(network *net, float *input,unsigned int WEIGHT_BASE,unsigned int BETA_BASE,unsigned int MEM_BASE) { int x; network orig = *net; net->input = input; int weight_offset[32] = {864, 18432, 73728, 8192, 73728, 294912, 32768, 294912, 1179648, 131072, 1179648, 131072, 1179648, 4718592, 524288, 4718592, 524288, 4718592, 9437184, 9437184, 32768, 11796480, 435200, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int beta_offset[32] = {32, 64, 128, 64, 128, 256, 128, 256, 512, 256, 512, 256, 512, 1024, 512, 1024, 512, 1024, 1024, 1024, 64, 1024, 425, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int offset_index = 0; double time1,time2; time1 = what_time_is_it_now(); copy_file2mem("weightsv2_comb_reorg_ap16.bin",(203767168)/2, WEIGHT_BASE);//->C253D80 printf("yolov2_w copy ok\n"); copy_file2mem("biasv2_comb_ap16.bin",(43044+4)/2,BETA_BASE);//->C268724 203812864 = C25F000 printf("yolov2_b copy ok\n"); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); float *region_buf = (float *)calloc(13*13*432,sizeof(float)); if(!region_buf) printf("region_buf calloc fail\n"); #define MEM_LEN (416*416*32*2+208*208*32*2) unsigned int Memory_top = MEM_BASE; unsigned int Memory_bottom = MEM_BASE + MEM_LEN; int in_ptr[32]; int out_ptr[32]; ///////////////////// #define QNUM 23 int inputQ[QNUM+1]; int weightQ[QNUM]; int betaQ[QNUM]; FILE *Qin; Qin = fopen("yolov2_ap16_inout_maxQ_24.bin","rb"); if(!Qin) file_error("Qin error 1\n"); fread(inputQ,sizeof(int),QNUM+1,Qin); fclose(Qin); if(inputQ[20] < inputQ[21]) inputQ[21] = inputQ[20]; else inputQ[20] = inputQ[21]; for(x=0;x<QNUM+1;x++) printf("[%2d inputQ]=%2d\n",x,inputQ[x]); Qin = fopen("weightsv2_comb_reorg_ap16_maxQ_23.bin","rb"); if(!Qin) file_error("Qin error 2\n"); fread(weightQ,sizeof(int),QNUM,Qin); fclose(Qin); for(x=0;x<QNUM;x++) printf("[%2d weightQ]=%2d\n",x,weightQ[x]); Qin = fopen("biasv2_comb_ap16_maxQ_23.bin","rb"); if(!Qin) file_error("Qin error 4\n"); fread(betaQ,sizeof(int),QNUM,Qin); fclose(Qin); for(x=0;x<QNUM;x++) printf("[%2d betaQ]=%2d\n",x,betaQ[x]); const double LastLayerOutputPara = pow(2.0,-inputQ[23]); ///////////////////// #define ROUTE16_LEN (26*26*512*4/2) #define CONV27_LEN (13*13*256*4/2) #define CONV24_LEN (13*13*1024*4/2) int *input_tmp_mem = (int *)calloc(416*416*32/2,sizeof(int)); if(!input_tmp_mem) file_error("input_tmp_mem error \n"); int *region_input_buffer = (int *)calloc(13*13*432*4/2,sizeof(int)); if(!region_input_buffer) file_error("region_input_buffer error \n"); int tmp_in; short current_in,next_in; bool NextPixelInFlag = true; int InputPixelOffset = 0; for(x=0;x<416*416*3;x++)//1st Layer input Q14 { if(NextPixelInFlag) { current_in = (short)(input[x]*pow(2.0,14)); NextPixelInFlag = false; } else { next_in = (short)(input[x]*pow(2.0,14)); tmp_in = (next_in<<16) + (current_in); input_tmp_mem[InputPixelOffset] = tmp_in; InputPixelOffset++; NextPixelInFlag = true; } } copy_mem2dev((uint8_t *)input_tmp_mem,416*416*3*4/2, MEM_BASE); free(input_tmp_mem); for(x=0;x<18;x++) { if(x%2==0) { in_ptr[x] = Memory_top; out_ptr[x] = Memory_bottom - net->layers[x].outputs*4/2 ; } else { in_ptr[x] = out_ptr[x-1]; out_ptr[x] = Memory_top; } } for(x=18;x<25;x++) { if(x%2==0) { in_ptr[x] = Memory_top; out_ptr[x] = Memory_bottom - ROUTE16_LEN - net->layers[x].outputs*4/2; }else { in_ptr[x] = out_ptr[x-1]; out_ptr[x] = Memory_top; } } in_ptr[26] = Memory_bottom - ROUTE16_LEN; out_ptr[26] = Memory_top; in_ptr[27] = Memory_top; out_ptr[27] = Memory_bottom - ROUTE16_LEN - CONV24_LEN - CONV27_LEN; in_ptr[29] = out_ptr[27]; out_ptr[29] = Memory_top; in_ptr[30] = Memory_top; out_ptr[30] = Memory_bottom - (net->layers[30].outputs + 13*13*3)*4/2; if(out_ptr[30]%(4*1024)!=0) { out_ptr[30] = (out_ptr[30]/(4*1024)-1)*(4*1024); } in_ptr[31] = out_ptr[30]; network netp = *net; int i; int j; int woffset = 0; int boffset = 0; int TR,TC,TM,TN; int output_w,output_h; int rLoops,cLoops,mLoops,nLoops; double time_sum = 0.0; int INPUTQ; for(i = 0; i < netp.n; ++i) { netp.index = i; layer l = netp.layers[i]; printf("Layer[%2d]: ",i); switch(l.type) { case CONVOLUTIONAL: printf("outputMemory:%8d;BN=%d;Activation=%d;conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n",l.outputs,l.batch_normalize,l.activation, l.n, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.); output_w = (l.w - l.size + 2*l.pad)/l.stride + 1 ; output_h = (l.h - l.size + 2*l.pad)/l.stride + 1 ; TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1 TR = MIN(output_h,TR); TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc); TC = MIN(output_w,TC); TM = MIN(l.n,Tm); TN = MIN(l.c,Tn); rLoops = (int)ceil(((float)output_h)/TR); cLoops = (int)ceil(((float)output_w)/TC); mLoops = (int)ceil(((float)l.n)/TM); nLoops = (int)ceil(((float)l.c)/TN); INPUTQ = inputQ[offset_index]; if(i==26) INPUTQ = inputQ[12]; time1 = what_time_is_it_now(); YOLO2_FPGA(in_ptr[i],out_ptr[i],woffset/2,boffset/2, l.c,l.n,l.size, l.stride,l.w,l.h,output_w,output_h, l.pad,l.activation==LEAKY?1:0,l.batch_normalize?1:0, TM,TN,TR,TC, mLoops,nLoops,rLoops,cLoops,0, INPUTQ,inputQ[offset_index+1],weightQ[offset_index],betaQ[offset_index], WEIGHT_BASE,BETA_BASE); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); woffset += weight_offset[offset_index]; boffset += beta_offset[offset_index]; offset_index++; break; case MAXPOOL: printf("outputMemory:%8d;max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c); output_w = l.out_h; output_h = l.out_w; TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1 TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc); TR = MIN(output_h,TR); TC = MIN(output_w,TC); TM = MIN(Tm,Tn); TM = MIN(l.c,TM); TN = TM; rLoops = (int)ceil(((float)output_h)/TR); cLoops = (int)ceil(((float)output_w)/TC); mLoops = (int)ceil(((float)l.c)/TM); time1 = what_time_is_it_now(); YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,l.c,l.c, l.size,l.stride,l.w,l.h,output_w,output_h, 0,0,0,TM,TN,TR,TC,mLoops,1,rLoops,cLoops,1, inputQ[offset_index],inputQ[offset_index],INTERWIDTH,INTERWIDTH, WEIGHT_BASE,BETA_BASE); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); break; case REORG: printf("outputMemory:%8d;reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c); output_w = 26; output_h = 32*13; TR = MIN(((OnChipIB_Height-l.stride)/l.stride+1),Tr);//keep Kernel_stride>=1 TR = MIN(output_h,TR); TC = MIN(((OnChipIB_Width-l.stride)/l.stride+1),Tc); TC = MIN(output_w,TC); TM = 4; TN = TM; rLoops = (int)ceil(((float)output_h)/TR); cLoops = (int)ceil(((float)output_w)/TC); mLoops = 1; time1 = what_time_is_it_now(); YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,1,4, l.stride,l.stride,52,32*26,output_w,output_h, 0,0,0,TM,TN,TR,TC,mLoops,1,rLoops,cLoops,2, inputQ[offset_index],inputQ[offset_index],INTERWIDTH,INTERWIDTH, WEIGHT_BASE,BETA_BASE); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); break; case ROUTE: printf("outputMemory:%8d;route ",l.outputs); for(j = 0; j < l.n; ++j){ printf(" %d", l.input_layers[j]); } printf("\n"); break; case REGION: // first=time(NULL); time1 = what_time_is_it_now(); printf("outputMemory:%8d;Detection\n",l.outputs); copy_dev2mem((uint8_t *)region_input_buffer,13*13*432*4/2, in_ptr[i]); bool NextPixelFlag = true; int OutputPixelOffset = 0; short current_p,next_p,output_p; int *Output_ptr = (int *)(region_input_buffer); for(j=0;j<l.outputs;j++) { if(NextPixelFlag) { int tmp_p = Output_ptr[OutputPixelOffset]; OutputPixelOffset++; current_p = tmp_p; next_p = tmp_p >> 16; output_p = current_p; NextPixelFlag = false; }else { output_p = next_p; NextPixelFlag = true; } region_buf[j] = output_p*LastLayerOutputPara; } netp.input = region_buf; //netp.input = in_ptr[i]; forward_region_layer(l,netp); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); break; } netp.input = l.output; } printf("TIME_SUM Predicted in %f seconds.\n",time_sum); *net = orig; free(region_input_buffer); free(region_buf); // free(Memory_buf); // free(Weight_buf); // free(Alpha_buf); // free(Beta_buf); } //////////////////////////HLS end #endif
omp-for-ordered.c
#include <stdio.h> #include <omp.h> int main() { #pragma omp parallel { int i; #pragma omp single printf("\nloop0, static, chunk 1, no ordered\n"); #pragma omp for schedule(static, 1) for(i = 0; i < 10 ; i++) { printf("Thread %d, i = %d\n",omp_get_thread_num(), i); } #pragma omp single printf("loop1, no schedule\n"); #pragma omp for ordered for(i = 0; i < 10 ; i++) { #pragma omp ordered { printf("Thread %d, i = %d\n",omp_get_thread_num(), i); } } #pragma omp single printf("\nloop2, static\n"); #pragma omp for schedule(static) ordered for(i = 0; i < 10 ; i++) { #pragma omp ordered { printf("Thread %d, i = %d\n",omp_get_thread_num(), i); } } #pragma omp single printf("\nloop3, static, chunk 1\n"); #pragma omp for schedule(static, 1) ordered for(i = 0; i < 10 ; i++) { #pragma omp ordered { printf("Thread %d, i = %d\n",omp_get_thread_num(), i); } } #pragma omp single printf("\nloop4, static, chunk 2\n"); #pragma omp for schedule(static, 2) ordered for(i = 0; i < 10 ; i++) { #pragma omp ordered { printf("Thread %d, i = %d\n",omp_get_thread_num(), i); } } } return 0; }
GB_unop__identity_fc64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_fp64) // op(A') function: GB (_unop_tran__identity_fc64_fp64) // C type: GxB_FC64_t // A type: double // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_fp64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gbwtgraph.h
#ifndef GBWTGRAPH_GBWTGRAPH_H #define GBWTGRAPH_GBWTGRAPH_H #include <vector> #include <gbwt/cached_gbwt.h> #include <sdsl/int_vector.hpp> #include <gbwtgraph/utils.h> /* gbwtgraph.h: GBWT-based Handle Graph. */ namespace gbwtgraph { //------------------------------------------------------------------------------ /* A HandleGraph implementation that uses GBWT for graph topology and extracts sequences from another HandleGraph. Faster sequence access but slower graph navigation than in XG. Also supports a version of follow_edges() that takes only paths supported by the indexed haplotypes. Graph file format versions: 1 The initial version. */ class GBWTGraph : public HandleGraph, public SerializableHandleGraph { public: GBWTGraph(); // Call deserialize() and set_gbwt() before using the graph. GBWTGraph(const GBWTGraph& source); GBWTGraph(GBWTGraph&& source); ~GBWTGraph(); /* The sequence source must implement the following subset of the HandleGraph interface for all nodes in forward orientation: - get_handle() - get_length() - get_sequence() */ template<class Source> GBWTGraph(const gbwt::GBWT& gbwt_index, const Source& sequence_source); void swap(GBWTGraph& another); GBWTGraph& operator=(const GBWTGraph& source); GBWTGraph& operator=(GBWTGraph&& source); struct Header { std::uint32_t tag, version; std::uint64_t nodes; std::uint64_t flags; constexpr static std::uint32_t TAG = 0x6B3764AF; constexpr static std::uint32_t VERSION = Version::GRAPH_VERSION; Header(); void sanitize(); bool check() const; bool operator==(const Header& another) const; bool operator!=(const Header& another) const { return !(this->operator==(another)); } }; const gbwt::GBWT* index; Header header; std::vector<char> sequences; sdsl::int_vector<0> offsets; sdsl::bit_vector real_nodes; constexpr static size_t CHUNK_SIZE = 1024; // For parallel for_each_handle(). constexpr static size_t BLOCK_SIZE = 64 * gbwt::MEGABYTE; // For serialization. const static std::string EXTENSION; // ".gg" //------------------------------------------------------------------------------ /* Standard HandleGraph interface. */ public: // Method to check if a node exists by ID. virtual bool has_node(nid_t node_id) const; // Look up the handle for the node with the given ID in the given orientation. virtual handle_t get_handle(const nid_t& node_id, bool is_reverse = false) const; // Get the ID from a handle. virtual nid_t get_id(const handle_t& handle) const; // Get the orientation of a handle. virtual bool get_is_reverse(const handle_t& handle) const; // Invert the orientation of a handle (potentially without getting its ID). virtual handle_t flip(const handle_t& handle) const; // Get the length of a node. virtual size_t get_length(const handle_t& handle) const; // Get the sequence of a node, presented in the handle's local forward // orientation. virtual std::string get_sequence(const handle_t& handle) const; // Returns one base of a handle's sequence, in the orientation of the // handle. virtual char get_base(const handle_t& handle, size_t index) const; // Returns a substring of a handle's sequence, in the orientation of the // handle. If the indicated substring would extend beyond the end of the // handle's sequence, the return value is truncated to the sequence's end. virtual std::string get_subsequence(const handle_t& handle, size_t index, size_t size) const; // Return the number of nodes in the graph. virtual size_t get_node_count() const; // Return the smallest ID in the graph, or some smaller number if the // smallest ID is unavailable. Return value is unspecified if the graph is empty. virtual nid_t min_node_id() const; // Return the largest ID in the graph, or some larger number if the // largest ID is unavailable. Return value is unspecified if the graph is empty. virtual nid_t max_node_id() const; protected: // Loop over all the handles to next/previous (right/left) nodes. Passes // them to a callback which returns false to stop iterating and true to // continue. Returns true if we finished and false if we stopped early. virtual bool follow_edges_impl(const handle_t& handle, bool go_left, const std::function<bool(const handle_t&)>& iteratee) const; // Loop over all the nodes in the graph in their local forward // orientations, in their internal stored order. Stop if the iteratee // returns false. Can be told to run in parallel, in which case stopping // after a false return value is on a best-effort basis and iteration // order is not defined. Returns true if we finished and false if we // stopped early. virtual bool for_each_handle_impl(const std::function<bool(const handle_t&)>& iteratee, bool parallel = false) const; public: /* Reimplementations to avoid weird segfaults / bus errors when calling lambdas on clang. */ /// Get the number of edges on the right (go_left = false) or left (go_left /// = true) side of the given handle. virtual size_t get_degree(const handle_t& handle, bool go_left) const; /// Returns true if there is an edge that allows traversal from the left /// handle to the right handle. virtual bool has_edge(const handle_t& left, const handle_t& right) const; //------------------------------------------------------------------------------ /* SerializableHandleGraph interface. */ public: // Serialize the sequences to the ostream. virtual void serialize(std::ostream& out) const; // Load the sequences from the istream. // Call set_gbwt() before using the graph. virtual void deserialize(std::istream& in); // Set the GBWT index used for graph topology. // Call deserialize() before using the graph. void set_gbwt(const gbwt::GBWT& gbwt_index); //------------------------------------------------------------------------------ /* GBWTGraph specific interface. */ public: // In-place view of the sequence; (start, length). typedef std::pair<const char*, size_t> view_type; // Convert gbwt::node_type to handle_t. static handle_t node_to_handle(gbwt::node_type node) { return handlegraph::as_handle(node); } // Convert handle_t to gbwt::node_type. static gbwt::node_type handle_to_node(const handle_t& handle) { return handlegraph::as_integer(handle); } // Get node sequence as a pointer and length. view_type get_sequence_view(const handle_t& handle) const; // Determine if the node sequence starts with the given character. bool starts_with(const handle_t& handle, char c) const; // Determine if the node sequence ends with the given character. bool ends_with(const handle_t& handle, char c) const; // Convert handle_t to gbwt::SearchState. // Note that the state may be empty if the handle does not correspond to a real node. gbwt::SearchState get_state(const handle_t& handle) const { return this->index->find(handle_to_node(handle)); } // Convert handle_t to gbwt::BidirectionalState. // Note that the state may be empty if the handle does not correspond to a real node. gbwt::BidirectionalState get_bd_state(const handle_t& handle) const { return this->index->bdFind(handle_to_node(handle)); } // Get the search state corresponding to the vector of handles. gbwt::SearchState find(const std::vector<handle_t>& path) const; // Get the bidirectional search state corresponding to the vector of handles. gbwt::BidirectionalState bd_find(const std::vector<handle_t>& path) const; // Visit all successor states of this state and call iteratee for the state. // Stop and return false if the iteratee returns false. // Note that this does not visit empty successor states. bool follow_paths(gbwt::SearchState state, const std::function<bool(const gbwt::SearchState&)>& iteratee) const; // Visit all predecessor/successor states of this state and call iteratee for the state. // Stop and return false if the iteratee returns false. // Note that this does not visit empty predecessor/successor states. // Each state corresponds to a path. Going backward extends the path left, while going // extends it right. When going backward, the state is for the reverse orientation. bool follow_paths(gbwt::BidirectionalState state, bool backward, const std::function<bool(const gbwt::BidirectionalState&)>& iteratee) const; //------------------------------------------------------------------------------ /* Cached GBWTGraph specific interface. Each thread must use a separate cache. */ // Return a cache for the GBWT index. Note: The cache is not thread-safe. gbwt::CachedGBWT get_cache() const { return gbwt::CachedGBWT(*(this->index), false); } // Convert handle_t to gbwt::SearchState. gbwt::SearchState get_state(const gbwt::CachedGBWT& cache, const handle_t& handle) const { return cache.find(handle_to_node(handle)); } // Convert handle_t to gbwt::BidirectionalState. gbwt::BidirectionalState get_bd_state(const gbwt::CachedGBWT& cache, const handle_t& handle) const { return cache.bdFind(handle_to_node(handle)); } // Get the search state corresponding to the vector of handles. gbwt::SearchState find(const gbwt::CachedGBWT& cache, const std::vector<handle_t>& path) const; // Get the bidirectional search state corresponding to the vector of handles. gbwt::BidirectionalState bd_find(const gbwt::CachedGBWT& cache, const std::vector<handle_t>& path) const; // Visit all successor states of this state and call iteratee for the state. // Stop and return false if the iteratee returns false. // Note that the state may be empty if no path continues to that node. bool follow_paths(const gbwt::CachedGBWT& cache, gbwt::SearchState state, const std::function<bool(const gbwt::SearchState&)>& iteratee) const; // Visit all predecessor/successor states of this state and call iteratee for the state. // Stop and return false if the iteratee returns false. // Note that the state may be empty if no path continues to that node. // Each state corresponds to a path. Going backward extends the path left, while going // extends it right. When going backward, the state is for the reverse orientation. bool follow_paths(const gbwt::CachedGBWT& cache, gbwt::BidirectionalState state, bool backward, const std::function<bool(const gbwt::BidirectionalState&)>& iteratee) const; //------------------------------------------------------------------------------ private: // Construction helpers. void determine_real_nodes(); std::vector<handle_t> cache_handles(const std::function<handle_t(gbwt::node_type)>& get_source_handle) const; void allocate_arrays(const std::function<size_t(size_t)>& get_source_length); void cache_sequences(const std::function<std::string(size_t)>& get_source_sequence); void copy(const GBWTGraph& source); size_t node_offset(gbwt::node_type node) const { return node - this->index->firstNode(); } size_t node_offset(const handle_t& handle) const { return this->node_offset(handle_to_node(handle)); } }; //------------------------------------------------------------------------------ /* Traverse all haplotype-consistent windows in the graph and call lambda() for each window. Uses multiple threads, so the lambda should be thread-safe. A window starts with the sequence of a node and is followed by window_size - 1 bases from subsequent nodes. If no extensions are possible, a shorter substring of length >= window_size also qualifies as a window. */ void for_each_haplotype_window(const GBWTGraph& graph, size_t window_size, const std::function<void(const std::vector<handle_t>&, const std::string&)>& lambda, bool parallel); //------------------------------------------------------------------------------ // Implementation of the main GBWTGraph constructor. template<class Source> GBWTGraph::GBWTGraph(const gbwt::GBWT& gbwt_index, const Source& sequence_source) : index(nullptr), header() { // Set GBWT and do sanity checks. this->set_gbwt(gbwt_index); // Add the sentinel to the offset vector of an empty graph just in case. if(this->index->empty()) { this->offsets = sdsl::int_vector<0>(1, 0); return; } // Determine the real nodes and cache the handles. We do these in parallel, as the first // is always a slow operation and the second is equally slow in XG. // Node n is real, if real_nodes[node_offset(n) / 2] is true. std::vector<handle_t> handle_cache; #pragma omp parallel { #pragma omp single { #pragma omp task { this->determine_real_nodes(); } #pragma omp task { handle_cache = this->cache_handles([&sequence_source](gbwt::node_type node) -> handle_t { return sequence_source.get_handle(gbwt::Node::id(node), false); }); } } } // Allocate space for the sequence and offset arrays. this->allocate_arrays([&sequence_source, &handle_cache](size_t offset) -> size_t { return sequence_source.get_length(handle_cache[offset]); }); // Store the concatenated sequences and their offset ranges for both orientations of all nodes. // Given GBWT node n, the sequence is sequences[node_offset(n)] to sequences[node_offset(n + 1) - 1]. this->cache_sequences([&sequence_source, &handle_cache](size_t offset) -> std::string { return sequence_source.get_sequence(handle_cache[offset]); }); } //------------------------------------------------------------------------------ } // namespace gbwtgraph #endif // GBWTGRAPH_GBWTGRAPH_H
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/gem.h" #include "magick/gem-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential colorspace of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleMatteType)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the RGBTransformImage method is: % % MagickBooleanType RGBTransformImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertRGBToCMY(const Quantum red,const Quantum green, const Quantum blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToLab(const Quantum red,const Quantum green, const Quantum blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const Quantum red,const Quantum green, const Quantum blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const Quantum red,const Quantum green, const Quantum blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const Quantum red,const Quantum green, const Quantum blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void ConvertRGBToYPbPr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static void ConvertRGBToYDbDr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } MagickExport MagickBooleanType RGBTransformImage(Image *image, const ColorspaceType colorspace) { #define RGBTransformImageTag "RGBTransform/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); pixel.red=(MagickRealType) pixel.red; pixel.green=(MagickRealType) pixel.green; pixel.blue=(MagickRealType) pixel.blue; ConvertRGBToCMYK(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+ 0.072186*GetPixelBlue(q); SetPixelGray(q,ClampToQuantum(DecodePixelGamma(gray))); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+ 0.072186*GetPixelBlue(q); SetPixelGray(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to HSI. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; red=ClampToQuantum((MagickRealType) GetPixelRed(q)); green=ClampToQuantum((MagickRealType) GetPixelGreen(q)); blue=ClampToQuantum((MagickRealType) GetPixelBlue(q)); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,logmap[ScaleQuantumToMap(red)]); SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]); SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601LumaColorspace: { /* Initialize Rec601 luma tables: G = 0.298839*R+0.586811*G+0.114350*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); x_map[i].y=(MagickRealType) (0.298839*(double) i); x_map[i].z=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); y_map[i].y=(MagickRealType) (0.586811*(double) i); y_map[i].z=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); z_map[i].y=(MagickRealType) (0.114350*(double) i); z_map[i].z=(MagickRealType) (0.114350*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709LumaColorspace: { /* Initialize Rec709 luma tables: G = 0.212656*R+0.715158*G+0.072186*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); x_map[i].y=(MagickRealType) (0.212656*(double) i); x_map[i].z=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); y_map[i].y=(MagickRealType) (0.715158*(double) i); y_map[i].z=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); z_map[i].y=(MagickRealType) (0.072186*(double) i); z_map[i].z=(MagickRealType) (0.072186*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; x_map[i].y=(-0.003296)*i; x_map[i].z=0.009410*i; y_map[i].x=0.010566*i; y_map[i].y=(-0.006471)*i; y_map[i].z=(-0.007880)*i; z_map[i].x=0.002052*i; z_map[i].y=0.009768*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *magick_restrict q; register size_t blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ (MagickRealType) primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ (MagickRealType) primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ (MagickRealType) primary_info.z; SetPixelRed(q,ScaleMapToQuantum(pixel.red)); SetPixelGreen(q,ScaleMapToQuantum(pixel.green)); SetPixelBlue(q,ScaleMapToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,RGBTransformImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register size_t blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=ScaleMapToQuantum(pixel.red); image->colormap[i].green=ScaleMapToQuantum(pixel.green); image->colormap[i].blue=ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) memset(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.0; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.0; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,&image->exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; CacheView *image_view; ImageType type; register const PixelPacket *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale"); if (IsStringNotFalse(value) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsGrayPixel(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; if ((type == GrayscaleType) && (image->matte != MagickFalse)) image->type=GrayscaleMatteType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { const char *value; CacheView *image_view; ImageType type; register ssize_t x; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale"); if (IsStringNotFalse(value) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsMonochromePixel(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(MagickTrue); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformRGBImage(image,image->colorspace)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformRGBImage(image,image->colorspace); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (RGBTransformImage(image,colorspace) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % The format of the TransformRGBImage method is: % % MagickBooleanType TransformRGBImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(1.0-cyan)); *green=ClampToQuantum(QuantumRange*(1.0-magenta)); *blue=ClampToQuantum(QuantumRange*(1.0-yellow)); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const MagickRealType value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,Quantum *red,Quantum *green,Quantum *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(0.99999999999914679361*Y- 1.2188941887145875e-06*(Pb-0.5)+1.4019995886561440468*(Pr-0.5))); *green=ClampToQuantum(QuantumRange*(0.99999975910502514331*Y- 0.34413567816504303521*(Pb-0.5)-0.71413649331646789076*(Pr-0.5))); *blue=ClampToQuantum(QuantumRange*(1.00000124040004623180*Y+ 1.77200006607230409200*(Pb-0.5)+2.1453384174593273e-06*(Pr-0.5))); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,Quantum *red,Quantum *green,Quantum *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5))); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+0.9562957197589482261*(I-0.5)+ 0.6210244164652610754*(Q-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.2721220993185104464*(I-0.5)- 0.6473805968256950427*(Q-0.5))); *blue=ClampToQuantum(QuantumRange*(Y-1.1069890167364901945*(I-0.5)+ 1.7046149983646481374*(Q-0.5))); } static void ConvertYUVToRGB(const double Y,const double U,const double V, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+ 1.1398279671717170825*(V-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.3946101641414141437*(U-0.5)- 0.5805003156565656797*(V-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+2.0319996843434342537*(U-0.5)- 4.813762626262513e-04*(V-0.5))); } MagickExport MagickBooleanType TransformRGBImage(Image *image, const ColorspaceType colorspace) { #define TransformRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000 }; CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); ConvertCMYKToRGB(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: case Rec601LumaColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+ 0.072186*GetPixelBlue(q); gray=EncodePixelGamma(gray); SetPixelRed(q,ClampToQuantum(gray)); SetPixelGreen(q,ClampToQuantum(gray)); SetPixelBlue(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: case Rec709LumaColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+ 0.072186*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(gray)); SetPixelGreen(q,ClampToQuantum(gray)); SetPixelBlue(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; X=QuantumScale*GetPixelRed(q); Y=QuantumScale*GetPixelGreen(q); Z=QuantumScale*GetPixelBlue(q); switch (colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=ClampToQuantum(QuantumRange*X); green=ClampToQuantum(QuantumRange*Y); blue=ClampToQuantum(QuantumRange*Z); break; } } SetPixelRed(q,ClampToQuantum((MagickRealType) red)); SetPixelGreen(q,ClampToQuantum((MagickRealType) green)); SetPixelBlue(q,ClampToQuantum((MagickRealType) blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelRed(q))])); green=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelGreen(q))])); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelBlue(q))])); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(1.0*(double) i); y_map[i].x=(0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(1.0*(double) i); y_map[i].y=(0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(1.0*(double) i); y_map[i].z=(-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) (0.0000000); z_map[i].x=(MagickRealType) (1.8215000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) ((-0.4302726)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) ((-0.9271435)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) (0.0000000); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(q)); green=ScaleQuantumToMap(GetPixelGreen(q)); blue=ScaleQuantumToMap(GetPixelBlue(q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransformRGBImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; register size_t blue, green, red; red=ScaleQuantumToMap(image->colormap[i].red); green=ScaleQuantumToMap(image->colormap[i].green); blue=ScaleQuantumToMap(image->colormap[i].blue); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=ClampToQuantum(pixel.red); image->colormap[i].green=ClampToQuantum(pixel.green); image->colormap[i].blue=ClampToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(MagickTrue); }
cheby.gold.h
#include "common/common.hpp" void cheby_step (double *out_def, double* Ac_def, double* Ap_def, double* RHS_def, double* Dinv_def, double h2inv, double c1, double c2, int N) { double (*Ap)[N][N] = (double (*)[N][N])Ap_def; double (*Ac)[N][N] = (double (*)[N][N])Ac_def; double (*out)[N][N] = (double (*)[N][N])out_def; double (*RHS)[N][N] = (double (*)[N][N])RHS_def; double (*Dinv)[N][N] = (double (*)[N][N])Dinv_def; #pragma omp parallel for for (int k = 1; k < N-1; k++) { for (int j = 1; j < N-1; j++) { #pragma GCC ivdep for (int i = 1; i < N-1; i++) { double MA = Ac[k][j][i] - h2inv * (0.03 * (Ac[k-1][j-1][i-1] + Ac[k-1][j-1][i+1] + Ac[k-1][j+1][i-1] + Ac[k-1][j+1][i+1] + Ac[k+1][j-1][i-1] + Ac[k+1][j-1][i+1] + Ac[k+1][j+1][i-1] + Ac[k+1][j+1][i+1]) + 0.1 * (Ac[k-1][j-1][i] + Ac[k-1][j][i-1] + Ac[k-1][j][i+1] + Ac[k-1][j+1][i] + Ac[k][j-1][i-1] + Ac[k][j-1][i+1] + Ac[k][j+1][i-1] + Ac[k][j+1][i+1] + Ac[k+1][j-1][i] + Ac[k+1][j][i-1] + Ac[k+1][j][i+1] + Ac[k+1][j+1][i]) + 0.46 * (Ac[k-1][j][i] + Ac[k][j-1][i] + Ac[k][j][i-1] + Ac[k][j][i+1] + Ac[k][j+1][i] + Ac[k+1][j][i]) - 4.26 * Ac[k][j][i]); out[k][j][i] = Ac[k][j][i] + c1 * (Ac[k][j][i] - Ap[k][j][i]) + c2 * Dinv[k][j][i] * (RHS[k][j][i] - MA); } } } } extern "C" void cheby_gold (double* out, double *Ac, double* Ap, double* RHS, double* Dinv, double h2inv, double c1, double c2, int N) { double* temp1 = getZero3DArray<double>(N, N, N); double* temp2 = getZero3DArray<double>(N, N, N); double* temp3 = getZero3DArray<double>(N, N, N); cheby_step(temp1, Ac, Ap, RHS, Dinv, h2inv, c1, c2, N); cheby_step(temp2, temp1, Ac, RHS, Dinv, h2inv, c1, c2, N); cheby_step(temp3, temp2, temp1, RHS, Dinv, h2inv, c1, c2, N); cheby_step(temp1, temp3, temp2, RHS, Dinv, h2inv, c1, c2, N); cheby_step(temp2, temp1, temp3, RHS, Dinv, h2inv, c1, c2, N); cheby_step(out, temp2, temp1, RHS, Dinv, h2inv, c1, c2, N); delete[] temp1; delete[] temp2; delete[] temp3; }
vision.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V V IIIII SSSSS IIIII OOO N N % % V V I SS I O O NN N % % V V I SSS I O O N N N % % V V I SS I O O N NN % % V IIIII SSSSS IIIII OOO N N % % % % % % MagickCore Computer Vision Methods % % % % Software Design % % Cristy % % September 2014 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/opencl-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/vision.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n n e c t e d C o m p o n e n t s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConnectedComponentsImage() returns the connected-components of the image % uniquely labeled. The returned connected components image colors member % defines the number of unique objects. Choose from 4 or 8-way connectivity. % % You are responsible for freeing the connected components objects resources % with this statement; % % objects = (CCObjectInfo *) RelinquishMagickMemory(objects); % % The format of the ConnectedComponentsImage method is: % % Image *ConnectedComponentsImage(const Image *image, % const size_t connectivity,CCObjectInfo **objects, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o connectivity: how many neighbors to visit, choose from 4 or 8. % % o objects: return the attributes of each unique object. % % o exception: return any errors or warnings in this structure. % */ static int CCObjectInfoCompare(const void *x,const void *y) { CCObjectInfo *p, *q; p=(CCObjectInfo *) x; q=(CCObjectInfo *) y; return((int) (q->area-(ssize_t) p->area)); } MagickExport Image *ConnectedComponentsImage(const Image *image, const size_t connectivity,CCObjectInfo **objects,ExceptionInfo *exception) { #define ConnectedComponentsImageTag "ConnectedComponents/Image" CacheView *component_view, *image_view, *object_view; CCObjectInfo *object; char *c; const char *artifact, *metrics[CCMaxMetrics]; double max_threshold, min_threshold; Image *component_image; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *equivalences; RectangleInfo bounding_box; ssize_t i; size_t size; ssize_t background_id, connect4[2][2] = { { -1, 0 }, { 0, -1 } }, connect8[4][2] = { { -1, -1 }, { -1, 0 }, { -1, 1 }, { 0, -1 } }, dx, dy, first, last, n, step, y; /* Initialize connected components image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (objects != (CCObjectInfo **) NULL) *objects=(CCObjectInfo *) NULL; component_image=CloneImage(image,0,0,MagickTrue,exception); if (component_image == (Image *) NULL) return((Image *) NULL); component_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (AcquireImageColormap(component_image,MaxColormapSize,exception) == MagickFalse) { component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize connected components equivalences. */ size=image->columns*image->rows; if (image->columns != (size/image->rows)) { component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } equivalences=AcquireMatrixInfo(size,1,sizeof(ssize_t),exception); if (equivalences == (MatrixInfo *) NULL) { component_image=DestroyImage(component_image); return((Image *) NULL); } for (n=0; n < (ssize_t) (image->columns*image->rows); n++) (void) SetMatrixElement(equivalences,n,0,&n); object=(CCObjectInfo *) AcquireQuantumMemory(MaxColormapSize,sizeof(*object)); if (object == (CCObjectInfo *) NULL) { equivalences=DestroyMatrixInfo(equivalences); component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(object,0,MaxColormapSize*sizeof(*object)); for (i=0; i < (ssize_t) MaxColormapSize; i++) { object[i].id=i; object[i].bounding_box.x=(ssize_t) image->columns; object[i].bounding_box.y=(ssize_t) image->rows; GetPixelInfo(image,&object[i].color); } /* Find connected components. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++) { if (status == MagickFalse) continue; dx=connectivity > 4 ? connect8[n][1] : connect4[n][1]; dy=connectivity > 4 ? connect8[n][0] : connect4[n][0]; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y-1,image->columns,3,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } p+=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel, target; ssize_t neighbor_offset, obj, offset, ox, oy, root; /* Is neighbor an authentic pixel and a different color than the pixel? */ GetPixelInfoPixel(image,p,&pixel); if (((x+dx) < 0) || ((x+dx) >= (ssize_t) image->columns) || ((y+dy) < 0) || ((y+dy) >= (ssize_t) image->rows)) { p+=GetPixelChannels(image); continue; } neighbor_offset=dy*(GetPixelChannels(image)*image->columns)+dx* GetPixelChannels(image); GetPixelInfoPixel(image,p+neighbor_offset,&target); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { p+=GetPixelChannels(image); continue; } /* Resolve this equivalence. */ offset=y*image->columns+x; neighbor_offset=dy*image->columns+dx; ox=offset; status=GetMatrixElement(equivalences,ox,0,&obj); while (obj != ox) { ox=obj; status=GetMatrixElement(equivalences,ox,0,&obj); } oy=offset+neighbor_offset; status=GetMatrixElement(equivalences,oy,0,&obj); while (obj != oy) { oy=obj; status=GetMatrixElement(equivalences,oy,0,&obj); } if (ox < oy) { status=SetMatrixElement(equivalences,oy,0,&ox); root=ox; } else { status=SetMatrixElement(equivalences,ox,0,&oy); root=oy; } ox=offset; status=GetMatrixElement(equivalences,ox,0,&obj); while (obj != root) { status=GetMatrixElement(equivalences,ox,0,&obj); status=SetMatrixElement(equivalences,ox,0,&root); } oy=offset+neighbor_offset; status=GetMatrixElement(equivalences,oy,0,&obj); while (obj != root) { status=GetMatrixElement(equivalences,oy,0,&obj); status=SetMatrixElement(equivalences,oy,0,&root); } status=SetMatrixElement(equivalences,y*image->columns+x,0,&root); p+=GetPixelChannels(image); } } } /* Label connected components. */ n=0; component_view=AcquireAuthenticCacheView(component_image,exception); for (y=0; y < (ssize_t) component_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(component_view,0,y,component_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) component_image->columns; x++) { ssize_t id, offset; offset=y*image->columns+x; status=GetMatrixElement(equivalences,offset,0,&id); if (id != offset) status=GetMatrixElement(equivalences,id,0,&id); else { id=n++; if (id >= (ssize_t) MaxColormapSize) break; } status=SetMatrixElement(equivalences,offset,0,&id); if (x < object[id].bounding_box.x) object[id].bounding_box.x=x; if (x >= (ssize_t) object[id].bounding_box.width) object[id].bounding_box.width=(size_t) x; if (y < object[id].bounding_box.y) object[id].bounding_box.y=y; if (y >= (ssize_t) object[id].bounding_box.height) object[id].bounding_box.height=(size_t) y; object[id].color.red+=QuantumScale*GetPixelRed(image,p); object[id].color.green+=QuantumScale*GetPixelGreen(image,p); object[id].color.blue+=QuantumScale*GetPixelBlue(image,p); if (image->alpha_trait != UndefinedPixelTrait) object[id].color.alpha+=QuantumScale*GetPixelAlpha(image,p); if (image->colorspace == CMYKColorspace) object[id].color.black+=QuantumScale*GetPixelBlack(image,p); object[id].centroid.x+=x; object[id].centroid.y+=y; object[id].area++; SetPixelIndex(component_image,(Quantum) id,q); p+=GetPixelChannels(image); q+=GetPixelChannels(component_image); } if (n > (ssize_t) MaxColormapSize) break; if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,ConnectedComponentsImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } component_view=DestroyCacheView(component_view); image_view=DestroyCacheView(image_view); equivalences=DestroyMatrixInfo(equivalences); if (n > (ssize_t) MaxColormapSize) { object=(CCObjectInfo *) RelinquishMagickMemory(object); component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"TooManyObjects"); } background_id=0; min_threshold=0.0; max_threshold=0.0; component_image->colors=(size_t) n; for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width-=(object[i].bounding_box.x-1); object[i].bounding_box.height-=(object[i].bounding_box.y-1); object[i].color.red/=(QuantumScale*object[i].area); object[i].color.green/=(QuantumScale*object[i].area); object[i].color.blue/=(QuantumScale*object[i].area); if (image->alpha_trait != UndefinedPixelTrait) object[i].color.alpha/=(QuantumScale*object[i].area); if (image->colorspace == CMYKColorspace) object[i].color.black/=(QuantumScale*object[i].area); object[i].centroid.x/=object[i].area; object[i].centroid.y/=object[i].area; max_threshold+=object[i].area; if (object[i].area > object[background_id].area) background_id=i; } max_threshold+=MagickEpsilon; n=(-1); artifact=GetImageArtifact(image,"connected-components:background-id"); if (artifact != (const char *) NULL) background_id=(ssize_t) StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"connected-components:area-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max area threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].area < min_threshold) || (object[i].area >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:keep-colors"); if (artifact != (const char *) NULL) { const char *p; /* Keep selected objects based on color, merge others. */ for (i=0; i < (ssize_t) component_image->colors; i++) object[i].merge=MagickTrue; for (p=artifact; ; ) { char color[MagickPathExtent]; PixelInfo pixel; const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,&pixel,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse) object[i].merge=MagickFalse; if (*q == '\0') break; p=q+1; } } artifact=GetImageArtifact(image,"connected-components:keep-ids"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"connected-components:keep"); if (artifact != (const char *) NULL) { /* Keep selected objects based on id, merge others. */ for (i=0; i < (ssize_t) component_image->colors; i++) object[i].merge=MagickTrue; for (c=(char *) artifact; *c != '\0'; ) { while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ',')) c++; first=(ssize_t) strtol(c,&c,10); if (first < 0) first+=(ssize_t) component_image->colors; last=first; while (isspace((int) ((unsigned char) *c)) != 0) c++; if (*c == '-') { last=(ssize_t) strtol(c+1,&c,10); if (last < 0) last+=(ssize_t) component_image->colors; } step=(ssize_t) (first > last ? -1 : 1); for ( ; first != (last+step); first+=step) object[first].merge=MagickFalse; } } artifact=GetImageArtifact(image,"connected-components:keep-top"); if (artifact != (const char *) NULL) { CCObjectInfo *top_objects; ssize_t top_ids; /* Keep top objects. */ top_ids=(ssize_t) StringToDouble(artifact,(char **) NULL); top_objects=(CCObjectInfo *) AcquireQuantumMemory(component_image->colors, sizeof(*top_objects)); if (top_objects == (CCObjectInfo *) NULL) { object=(CCObjectInfo *) RelinquishMagickMemory(object); component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(top_objects,object,component_image->colors*sizeof(*object)); qsort((void *) top_objects,component_image->colors,sizeof(*top_objects), CCObjectInfoCompare); for (i=top_ids+1; i < (ssize_t) component_image->colors; i++) object[top_objects[i].id].merge=MagickTrue; top_objects=(CCObjectInfo *) RelinquishMagickMemory(top_objects); } artifact=GetImageArtifact(image,"connected-components:remove-colors"); if (artifact != (const char *) NULL) { const char *p; /* Remove selected objects based on color, keep others. */ for (p=artifact; ; ) { char color[MagickPathExtent]; PixelInfo pixel; const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,&pixel,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse) object[i].merge=MagickTrue; if (*q == '\0') break; p=q+1; } } artifact=GetImageArtifact(image,"connected-components:remove-ids"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"connected-components:remove"); if (artifact != (const char *) NULL) for (c=(char *) artifact; *c != '\0'; ) { /* Remove selected objects based on id, keep others. */ while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ',')) c++; first=(ssize_t) strtol(c,&c,10); if (first < 0) first+=(ssize_t) component_image->colors; last=first; while (isspace((int) ((unsigned char) *c)) != 0) c++; if (*c == '-') { last=(ssize_t) strtol(c+1,&c,10); if (last < 0) last+=(ssize_t) component_image->colors; } step=(ssize_t) (first > last ? -1 : 1); for ( ; first != (last+step); first+=step) object[first].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:perimeter-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max perimeter threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="perimeter"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; RectangleInfo bounding_box; size_t pattern[4] = { 1, 0, 0, 0 }; ssize_t y; /* Compute perimeter of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=(-1); y < (ssize_t) bounding_box.height+1; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1, bounding_box.y+y,bounding_box.width+2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=(-1); x < (ssize_t) bounding_box.width+1; x++) { Quantum pixels[4]; ssize_t v; size_t foreground; /* An Algorithm for Calculating Objects’ Shape Features in Binary Images, Lifeng He, Yuyan Chao. */ foreground=0; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { ssize_t offset; offset=v*(bounding_box.width+2)* GetPixelChannels(component_image)+u* GetPixelChannels(component_image); pixels[2*v+u]=GetPixelIndex(component_image,p+offset); if ((ssize_t) pixels[2*v+u] == i) foreground++; } } if (foreground == 1) pattern[1]++; else if (foreground == 2) { if ((((ssize_t) pixels[0] == i) && ((ssize_t) pixels[3] == i)) || (((ssize_t) pixels[1] == i) && ((ssize_t) pixels[2] == i))) pattern[0]++; /* diagonal */ else pattern[2]++; } else if (foreground == 3) pattern[3]++; p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[n]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+ MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5); } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:circularity-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max circularity threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="circularity"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; RectangleInfo bounding_box; size_t pattern[4] = { 1, 0, 0, 0 }; ssize_t y; /* Compute perimeter of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=(-1); y < (ssize_t) bounding_box.height; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1, bounding_box.y+y,bounding_box.width+2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=(-1); x < (ssize_t) bounding_box.width; x++) { Quantum pixels[4]; ssize_t v; size_t foreground; /* An Algorithm for Calculating Objects’ Shape Features in Binary Images, Lifeng He, Yuyan Chao. */ foreground=0; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { ssize_t offset; offset=v*(bounding_box.width+2)* GetPixelChannels(component_image)+u* GetPixelChannels(component_image); pixels[2*v+u]=GetPixelIndex(component_image,p+offset); if ((ssize_t) pixels[2*v+u] == i) foreground++; } } if (foreground == 1) pattern[1]++; else if (foreground == 2) { if ((((ssize_t) pixels[0] == i) && ((ssize_t) pixels[3] == i)) || (((ssize_t) pixels[1] == i) && ((ssize_t) pixels[2] == i))) pattern[0]++; /* diagonal */ else pattern[2]++; } else if (foreground == 3) pattern[3]++; p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[n]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+ MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5); object[i].metric[n]=4.0*MagickPI*object[i].area/(object[i].metric[n]* object[i].metric[n]); } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:diameter-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max diameter threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="diameter"; for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].metric[n]=ceil(sqrt(4.0*object[i].area/MagickPI)-0.5); if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } } artifact=GetImageArtifact(image,"connected-components:major-axis-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max ellipse major threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="major-axis"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; ssize_t y; /* Compute ellipse major axis of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[n]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+ sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:minor-axis-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max ellipse minor threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="minor-axis"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; ssize_t y; /* Compute ellipse major axis of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[n]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)- sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image, "connected-components:eccentricity-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max eccentricity threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="eccentricy"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }, ellipse_axis = { 0.0, 0.0 }; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; ssize_t y; /* Compute eccentricity of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); ellipse_axis.x=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+ sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); ellipse_axis.y=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)- sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); object[i].metric[n]=sqrt(1.0-(ellipse_axis.y*ellipse_axis.y* PerceptibleReciprocal(ellipse_axis.x*ellipse_axis.x))); } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:angle-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max ellipse angle threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="angle"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; ssize_t y; /* Compute ellipse angle of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[n]=RadiansToDegrees(1.0/2.0*atan(2.0*M11* PerceptibleReciprocal(M20-M02))); if (fabs(M11) < 0.0) { if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0)) object[i].metric[n]+=90.0; } else if (M11 < 0.0) { if (fabs(M20-M02) >= 0.0) { if ((M20-M02) < 0.0) object[i].metric[n]+=90.0; else object[i].metric[n]+=180.0; } } else if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0)) object[i].metric[n]+=90.0; } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } /* Merge any object not within the min and max area threshold. */ component_view=AcquireAuthenticCacheView(component_image,exception); object_view=AcquireVirtualCacheView(component_image,exception); for (i=0; i < (ssize_t) component_image->colors; i++) { ssize_t j; size_t id; if (status == MagickFalse) continue; if ((object[i].merge == MagickFalse) || (i == background_id)) continue; /* keep object */ /* Merge this object. */ for (j=0; j < (ssize_t) component_image->colors; j++) object[j].census=0; bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) bounding_box.width; x++) { ssize_t n; if (status == MagickFalse) continue; j=(ssize_t) GetPixelIndex(component_image,p); if (j == i) for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++) { const Quantum *p; /* Compute area of adjacent objects. */ if (status == MagickFalse) continue; dx=connectivity > 4 ? connect8[n][1] : connect4[n][1]; dy=connectivity > 4 ? connect8[n][0] : connect4[n][0]; p=GetCacheViewVirtualPixels(object_view,bounding_box.x+x+dx, bounding_box.y+y+dy,1,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } j=(ssize_t) GetPixelIndex(component_image,p); if (j != i) object[j].census++; } p+=GetPixelChannels(component_image); } } /* Merge with object of greatest adjacent area. */ id=0; for (j=1; j < (ssize_t) component_image->colors; j++) if (object[j].census > object[id].census) id=(size_t) j; object[i].area=0.0; for (y=0; y < (ssize_t) bounding_box.height; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,q) == i) SetPixelIndex(component_image,(Quantum) id,q); q+=GetPixelChannels(component_image); } if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse) status=MagickFalse; } } object_view=DestroyCacheView(object_view); component_view=DestroyCacheView(component_view); artifact=GetImageArtifact(image,"connected-components:mean-color"); if (IsStringTrue(artifact) != MagickFalse) { /* Replace object with mean color. */ for (i=0; i < (ssize_t) component_image->colors; i++) component_image->colormap[i]=object[i].color; } (void) SyncImage(component_image,exception); artifact=GetImageArtifact(image,"connected-components:verbose"); if ((IsStringTrue(artifact) != MagickFalse) || (objects != (CCObjectInfo **) NULL)) { /* Report statistics on each unique object. */ for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width=0; object[i].bounding_box.height=0; object[i].bounding_box.x=(ssize_t) component_image->columns; object[i].bounding_box.y=(ssize_t) component_image->rows; object[i].centroid.x=0; object[i].centroid.y=0; object[i].census=object[i].area == 0.0 ? 0.0 : 1.0; object[i].area=0; } component_view=AcquireVirtualCacheView(component_image,exception); for (y=0; y < (ssize_t) component_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,0,y,component_image->columns, 1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) component_image->columns; x++) { size_t id; id=(size_t) GetPixelIndex(component_image,p); if (x < object[id].bounding_box.x) object[id].bounding_box.x=x; if (x > (ssize_t) object[id].bounding_box.width) object[id].bounding_box.width=(size_t) x; if (y < object[id].bounding_box.y) object[id].bounding_box.y=y; if (y > (ssize_t) object[id].bounding_box.height) object[id].bounding_box.height=(size_t) y; object[id].centroid.x+=x; object[id].centroid.y+=y; object[id].area++; p+=GetPixelChannels(component_image); } } for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width-=(object[i].bounding_box.x-1); object[i].bounding_box.height-=(object[i].bounding_box.y-1); object[i].centroid.x=object[i].centroid.x/object[i].area; object[i].centroid.y=object[i].centroid.y/object[i].area; } component_view=DestroyCacheView(component_view); qsort((void *) object,component_image->colors,sizeof(*object), CCObjectInfoCompare); if (objects == (CCObjectInfo **) NULL) { ssize_t j; artifact=GetImageArtifact(image, "connected-components:exclude-header"); if (IsStringTrue(artifact) == MagickFalse) { (void) fprintf(stdout,"Objects ("); artifact=GetImageArtifact(image, "connected-components:exclude-ids"); if (IsStringTrue(artifact) == MagickFalse) (void) fprintf(stdout,"id: "); (void) fprintf(stdout,"bounding-box centroid area mean-color"); for (j=0; j <= n; j++) (void) fprintf(stdout," %s",metrics[j]); (void) fprintf(stdout,"):\n"); } for (i=0; i < (ssize_t) component_image->colors; i++) if (object[i].census > 0.0) { char mean_color[MagickPathExtent]; GetColorTuple(&object[i].color,MagickFalse,mean_color); (void) fprintf(stdout," "); artifact=GetImageArtifact(image, "connected-components:exclude-ids"); if (IsStringTrue(artifact) == MagickFalse) (void) fprintf(stdout,"%.20g: ",(double) object[i].id); (void) fprintf(stdout, "%.20gx%.20g%+.20g%+.20g %.1f,%.1f %.*g %s",(double) object[i].bounding_box.width,(double) object[i].bounding_box.height,(double) object[i].bounding_box.x,(double) object[i].bounding_box.y, object[i].centroid.x,object[i].centroid.y, GetMagickPrecision(),(double) object[i].area,mean_color); for (j=0; j <= n; j++) (void) fprintf(stdout," %.*g",GetMagickPrecision(), object[i].metric[j]); (void) fprintf(stdout,"\n"); } } } if (objects == (CCObjectInfo **) NULL) object=(CCObjectInfo *) RelinquishMagickMemory(object); else *objects=object; return(component_image); }
3518.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp parallel for simd num_threads(1) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
solution.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <unistd.h> #include <omp.h> // Определение функции double Func(double x) { // Недействительные значения не должны вносить вклад в интеграл if (x > 2) { return 0; } return sqrt(4 - x*x); } int main(int argc, char **argv) { // Количество шагов size_t N = 1000000; // Запрошенное кол-во процессов int size = 1; if (argc > 1) { N = atoll(argv[1]); if (argc > 2) { size = atoi(argv[2]); } } // Задаем границы интегрирования double a = 0, b = 2; // Задаем мелкость разбиения отрезка double h = (b - a) / N; double result = (Func(0) + Func(N * h)) / 2; // Задаем кол-во процессов для следующего распараллеливания omp_set_num_threads(size); // Статическое распределение итераций с шагом в 10^4 // правильное суммирование всех параллельных вычислений #pragma omp parallel for schedule(static, 10000) reduction(+: result) for (size_t i = 1; i < N; i++) { result += Func(i * h); } result *= h; // Вывод кол-ва процессов, используемых программой, и значение интеграла printf("%d %lf\n", size, result); return EXIT_SUCCESS; }
pr27415.c
/* PR middle-end/27415 */ /* { dg-do compile } */ void test1 (void) { int i = 0; #pragma omp parallel #pragma omp for firstprivate (i) /* { dg-error "should not be firstprivate" } */ for (i = 0; i < 10; i++) ; } void test2 (void) { int i = 0; #pragma omp parallel for firstprivate (i) /* { dg-error "should not be firstprivate" } */ for (i = 0; i < 10; i++) ; } void test3 (void) { int i = 0; #pragma omp parallel #pragma omp for reduction (+:i) /* { dg-error "should not be reduction" } */ for (i = 0; i < 10; i++) ; } void test4 (void) { int i = 0; #pragma omp parallel for reduction (*:i) /* { dg-error "should not be reduction" } */ for (i = 0; i < 10; i++) ; } void test5 (void) { int i = 0; #pragma omp parallel firstprivate (i) #pragma omp for for (i = 0; i < 10; i++) ; }
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/resize.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,chop_image,1,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict chop_indexes, *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { register const PixelPacket *restrict p; register IndexPacket *restrict chop_indexes, *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; register ssize_t i; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); cmyk_images=NewImageList(); for (i=0; i < (ssize_t) GetImageListLength(images); i+=4) { cmyk_image=CloneImage(images,images->columns,images->rows,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace); image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p))); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(cmyk_view); for (x=0; x < (ssize_t) images->columns; x++) { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange- GetPixelIntensity(images,p))); p++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); AppendImageToList(&cmyk_images,cmyk_image); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) || ((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,crop_image,1,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict crop_indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view); (void) CopyMagickMemory(q,p,(size_t) crop_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (crop_indexes != (IndexPacket *) NULL)) (void) CopyMagickMemory(crop_indexes,indexes,(size_t) crop_image->columns* sizeof(*crop_indexes)); if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CropImage) #endif proceed=SetImageProgress(image,CropImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); crop_image=NewImageList(); next=NewImageList(); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict excerpt_indexes, *restrict indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) excerpt_image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view); if (excerpt_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(excerpt_indexes,indexes,(size_t) excerpt_image->columns*sizeof(*excerpt_indexes)); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExcerptImage) #endif proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(extent_image,DirectClass) == MagickFalse) { InheritException(exception,&extent_image->exception); extent_image=DestroyImage(extent_image); return((Image *) NULL); } if (extent_image->background_color.opacity != OpaqueOpacity) extent_image->matte=MagickTrue; (void) SetImageBackgroundColor(extent_image); (void) CompositeImage(extent_image,image->compose,image,-geometry->x, -geometry->y); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); flip_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,flip_image,1,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict flip_indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewVirtualIndexQueue(image_view); if (indexes != (const IndexPacket *) NULL) { flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view); if (flip_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(flip_indexes,indexes,(size_t) image->columns* sizeof(*flip_indexes)); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlipImage) #endif proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,flop_image,1,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict flop_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=flop_image->columns; indexes=GetCacheViewVirtualIndexQueue(image_view); flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view); for (x=0; x < (ssize_t) flop_image->columns; x++) { (*--q)=(*p++); if ((indexes != (const IndexPacket *) NULL) && (flop_indexes != (IndexPacket *) NULL)) SetPixelIndex(flop_indexes+flop_image->columns-x-1, GetPixelIndex(indexes+x)); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlopImage) #endif proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyImageRegion(Image *destination, const Image *source,const size_t columns,const size_t rows, const ssize_t sx,const ssize_t sy,const ssize_t dx,const ssize_t dy, ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict destination_indexes; register PixelPacket *restrict q; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source_view); (void) CopyMagickMemory(q,p,(size_t) columns*sizeof(*p)); if (indexes != (IndexPacket *) NULL) { destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view); if (destination_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(destination_indexes,indexes,(size_t) columns*sizeof(*indexes)); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); roll_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse) { InheritException(exception,&splice_image->exception); splice_image=DestroyImage(splice_image); return((Image *) NULL); } (void) SetImageBackgroundColor(splice_image); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case StaticGravity: case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,splice_image,1,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes, *restrict splice_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < splice_geometry.x; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,splice_image,1,1) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes, *restrict splice_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, image->columns,1,exception); if ((y < 0) || (y >= (ssize_t) splice_image->rows)) continue; q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < splice_geometry.x; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ /* DANGER: This function destroys what it assumes to be a single image list. If the input image is part of a larger list, all other images in that list will be simply 'lost', not destroyed. Also if the crop generates a list of images only the first image is resized. And finally if the crop succeeds and the resize failed, you will get a cropped image, as well as a 'false' or 'failed' report. This function and should probably be depreciated in favor of direct calls to CropImageToTiles() or ResizeImage(), as appropriate. */ MagickExport MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry) { Image *resize_image, *transform_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ flags=ParseRegionGeometry(transform_image,image_geometry,&geometry, &(*image)->exception); (void) flags; if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,transform_image->blur,&(*image)->exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImages() calls TransformImage() on each image of a sequence. % % The format of the TransformImage method is: % % MagickBooleanType TransformImages(Image **image, % const char *crop_geometry,const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ MagickExport MagickBooleanType TransformImages(Image **images, const char *crop_geometry,const char *image_geometry) { Image *image, **image_list, *transform_images; MagickStatusType status; register ssize_t i; assert(images != (Image **) NULL); assert((*images)->signature == MagickSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); image_list=ImageListToArray(*images,&(*images)->exception); if (image_list == (Image **) NULL) return(MagickFalse); status=MagickTrue; transform_images=NewImageList(); for (i=0; image_list[i] != (Image *) NULL; i++) { image=image_list[i]; status&=TransformImage(&image,crop_geometry,image_geometry); AppendImageToList(&transform_images,image); } *images=transform_images; image_list=(Image **) RelinquishMagickMemory(image_list); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict transpose_indexes, *restrict indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view); if (transpose_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(transpose_indexes,indexes,(size_t) image->columns*sizeof(*transpose_indexes)); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,TransposeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register IndexPacket *restrict transverse_indexes, *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y- 1),0,1,transverse_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=image->columns; for (x=0; x < (ssize_t) image->columns; x++) *--q=(*p++); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view); if (transverse_indexes != (IndexPacket *) NULL) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(transverse_indexes+image->columns-x-1, GetPixelIndex(indexes+x)); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransverseImage) #endif proceed=SetImageProgress(image,TransverseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; return(CropImage(image,&geometry,exception)); }
shallow2d.c
#include <string.h> #include <math.h> #include <omp.h> //ldoc on /** * ## Implementation * * The actually work of computing the fluxes and speeds is done * by local (`static`) helper functions that take as arguments * pointers to all the individual fields. This is helpful to the * compilers, since by specifying the `restrict` keyword, we are * promising that we will not access the field data through the * wrong pointer. This lets the compiler do a better job with * vectorization. */ static const float g = 9.8; static void shallow2dv_flux(float* restrict fh, float* restrict fhu, float* restrict fhv, float* restrict gh, float* restrict ghu, float* restrict ghv, const float* restrict h, const float* restrict hu, const float* restrict hv, float g, int ncell) { memcpy(fh, hu, ncell * sizeof(float)); memcpy(gh, hv, ncell * sizeof(float)); for (int i = 0; i < ncell; ++i) { float hi = h[i], hui = hu[i], hvi = hv[i]; float inv_h = 1.0f/hi; fhu[i] = hui*hui*inv_h + (0.5f*g)*hi*hi; fhv[i] = hui*hvi*inv_h; ghu[i] = hui*hvi*inv_h; ghv[i] = hvi*hvi*inv_h + (0.5f*g)*hi*hi; } } static void shallow2dv_speed(float* restrict cxy, const float* restrict h, const float* restrict hu, const float* restrict hv, float g, int ncell) { static float max_cx[200]; static float max_cy[200]; int n; #pragma omp parallel { n = omp_get_num_threads(); int id = omp_get_thread_num(); for (int i = ncell * id / n; i < ncell * (id + 1) / n; i++) { float hi = h[i]; float inv_hi = 1.0f/h[i]; float root_gh = sqrtf(g * hi); float cxi = fabsf(hu[i] * inv_hi) + root_gh; float cyi = fabsf(hv[i] * inv_hi) + root_gh; if (max_cx[id] < cxi) max_cx[id] = cxi; if (max_cy[id] < cyi) max_cy[id] = cyi; } } cxy[0] = max_cx[0]; cxy[1] = max_cy[0]; for (int i = 1; i < n; i++) { if (cxy[0] < max_cx[i]) cxy[0] = max_cx[i]; if (cxy[1] < max_cy[i]) cxy[1] = max_cy[i]; } /* float cx = cxy[0]; float cy = cxy[1]; for (int i = 0; i < ncell; ++i) { float hi = h[i]; float inv_hi = 1.0f/h[i]; float root_gh = sqrtf(g * hi); float cxi = fabsf(hu[i] * inv_hi) + root_gh; float cyi = fabsf(hv[i] * inv_hi) + root_gh; if (cx < cxi) cx = cxi; if (cy < cyi) cy = cyi; } cxy[0] = cx; cxy[1] = cy; */ } void shallow2d_flux(float* FU, float* GU, const float* U, int ncell, int field_stride) { shallow2dv_flux(FU, FU+field_stride, FU+2*field_stride, GU, GU+field_stride, GU+2*field_stride, U, U +field_stride, U +2*field_stride, g, ncell); } void shallow2d_speed(float* cxy, const float* U, int ncell, int field_stride) { shallow2dv_speed(cxy, U, U+field_stride, U+2*field_stride, g, ncell); }
DiracMatrix.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2019 QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_DIRAC_MATRIX_H #define QMCPLUSPLUS_DIRAC_MATRIX_H #include "Numerics/Blasf.h" #include <OhmmsPETE/OhmmsMatrix.h> #include "Numerics/BlasThreadingEnv.h" #include <type_traits/scalar_traits.h> #include "simd/simd.hpp" namespace qmcplusplus { inline void Xgetrf(int n, int m, float* restrict a, int lda, int* restrict piv) { int status; sgetrf(n, m, a, lda, piv, status); } inline void Xgetri(int n, float* restrict a, int lda, int* restrict piv, float* restrict work, int& lwork) { int status; sgetri(n, a, lda, piv, work, lwork, status); } inline void Xgetrf(int n, int m, std::complex<float>* restrict a, int lda, int* restrict piv) { int status; cgetrf(n, m, a, lda, piv, status); } /** inversion of a float matrix after lu factorization*/ inline void Xgetri(int n, std::complex<float>* restrict a, int lda, int* restrict piv, std::complex<float>* restrict work, int& lwork) { int status; cgetri(n, a, lda, piv, work, lwork, status); } inline void Xgetrf(int n, int m, double* restrict a, int lda, int* restrict piv) { int status; dgetrf(n, m, a, lda, piv, status); } inline void Xgetri(int n, double* restrict a, int lda, int* restrict piv, double* restrict work, int& lwork) { int status; dgetri(n, a, lda, piv, work, lwork, status); } inline void Xgetrf(int n, int m, std::complex<double>* restrict a, int lda, int* restrict piv) { int status; zgetrf(n, m, a, lda, piv, status); } /** inversion of a std::complex<double> matrix after lu factorization*/ inline void Xgetri(int n, std::complex<double>* restrict a, int lda, int* restrict piv, std::complex<double>* restrict work, int& lwork) { int status; zgetri(n, a, lda, piv, work, lwork, status); } template<typename TIN, typename TOUT> inline void TansposeSquare(const TIN* restrict in, TOUT* restrict out, size_t n, size_t lda) { #pragma omp simd for (size_t i = 0; i < n; ++i) for (size_t j = 0; j < n; ++j) out[i * lda + j] = in[i + j * lda]; } template<typename T> inline T computeLogDet(const T* restrict diag, int n, const int* restrict pivot, T& phase) { T logdet(0); int sign_det = 1; for (size_t i = 0; i < n; i++) { sign_det *= (pivot[i] == i + 1) ? 1 : -1; sign_det *= (diag[i] > 0) ? 1 : -1; logdet += std::log(std::abs(diag[i])); } phase = (sign_det > 0) ? T(0) : M_PI; return logdet; } template<typename T> inline T computeLogDet(const std::complex<T>* restrict diag, int n, const int* restrict pivot, T& phase) { T logdet(0); phase = T(0); for (size_t i = 0; i < n; i++) { phase += std::arg(diag[i]); if (pivot[i] != i + 1) phase += M_PI; logdet += std::log(diag[i].real() * diag[i].real() + diag[i].imag() * diag[i].imag()); //slightly smaller error with the following // logdet+=2.0*std::log(std::abs(x[ii]); } constexpr T one_over_2pi = T(1) / TWOPI; phase -= std::floor(phase * one_over_2pi) * TWOPI; return 0.5 * logdet; } template<typename T_FP, typename T = T_FP> class DiracMatrix { typedef typename scalar_traits<T>::real_type real_type; typedef typename scalar_traits<T_FP>::real_type real_type_fp; aligned_vector<T_FP> m_work; aligned_vector<int> m_pivot; int Lwork; /// scratch space used for mixed precision Matrix<T_FP> psiM_fp; /// LU diagonal elements aligned_vector<T_FP> LU_diag; /// reset internal work space inline void reset(T_FP* invMat_ptr, const int lda) { m_pivot.resize(lda); Lwork = -1; T_FP tmp; real_type_fp lw; Xgetri(lda, invMat_ptr, lda, m_pivot.data(), &tmp, Lwork); convert(tmp, lw); Lwork = static_cast<int>(lw); m_work.resize(Lwork); LU_diag.resize(lda); } public: DiracMatrix() : Lwork(0) {} /** compute the inverse of the transpose of matrix A * assume precision T_FP >= T, do the inversion always with T_FP */ inline void invert_transpose(const Matrix<T>& amat, Matrix<T>& invMat, real_type& LogDet, real_type& Phase) { BlasThreadingEnv knob(getNextLevelNumThreads()); const int n = invMat.rows(); const int lda = invMat.cols(); T_FP* invMat_ptr(nullptr); #if !defined(MIXED_PRECISION) simd::transpose(amat.data(), n, amat.cols(), invMat.data(), n, invMat.cols()); invMat_ptr = invMat.data(); #else psiM_fp.resize(n,lda); simd::transpose(amat.data(), n, amat.cols(), psiM_fp.data(), n, psiM_fp.cols()); invMat_ptr = psiM_fp.data(); #endif if (Lwork < lda) reset(invMat_ptr, lda); Xgetrf(n, n, invMat_ptr, lda, m_pivot.data()); for(int i=0; i<n; i++) LU_diag[i] = invMat_ptr[i*lda+i]; real_type_fp Phase_tmp; LogDet = computeLogDet(LU_diag.data(), n, m_pivot.data(), Phase_tmp); Phase = Phase_tmp; Xgetri(n, invMat_ptr, lda, m_pivot.data(), m_work.data(), Lwork); #if defined(MIXED_PRECISION) invMat = psiM_fp; #endif } }; } // namespace qmcplusplus #endif // QMCPLUSPLUS_DIRAC_MATRIX_H
gi_maxmin_vertex_labeling.h
/* * * Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef MAXMIN_VERTEX_LABELING_H #define MAXMIN_VERTEX_LABELING_H #include <omp.h> #include "gi_basic_types.h" #include "gi_regular_grid_3d.h" #include "gi_topological_regular_grid_3d.h" #include "gi_labeling.h" #include "gi_array_index_partition.h" namespace GInt { template <class MeshType, class GridFuncType> class RegularGridMaxMinVertexLabeling3D { protected: BYTE_TYPE* byte_tmp_max_ids; BYTE_TYPE* byte_tmp_min_ids; const RegularGrid3D* mGrid; MeshType* mMesh; GridFuncType* mGridFunc; public: RegularGridMaxMinVertexLabeling3D(MeshType* mesh, GridFuncType* gridFunc) : mMesh(mesh), mGridFunc(gridFunc) { byte_tmp_max_ids = NULL; byte_tmp_min_ids = NULL; mGrid = mGridFunc->GetGrid(); } ~RegularGridMaxMinVertexLabeling3D() { if (byte_tmp_max_ids != NULL) delete byte_tmp_max_ids; if (byte_tmp_min_ids != NULL) delete byte_tmp_min_ids; } void HACK_init() { byte_tmp_max_ids = new BYTE_TYPE[mMesh->numCells()]; byte_tmp_min_ids = new BYTE_TYPE[mMesh->numCells()]; } BYTE_TYPE GetUncompressedMaxVal(INDEX_TYPE cellid) const { return byte_tmp_max_ids[cellid]; } BYTE_TYPE GetUncompressedMinVal(INDEX_TYPE cellid) const { return byte_tmp_min_ids[cellid]; } void SetUncompressedMaxVal(INDEX_TYPE cellid, BYTE_TYPE val) { byte_tmp_max_ids[cellid] = val; } void SetUncompressedMinVal(INDEX_TYPE cellid, BYTE_TYPE val) { byte_tmp_min_ids[cellid] = val; } INDEX_TYPE Cell2HighestVertex(INDEX_TYPE cellid) { return mMesh->UncompressByteToVertexOffset(cellid, byte_tmp_max_ids[cellid]); } INDEX_TYPE Cell2LowestVertex(INDEX_TYPE cellid) { return mMesh->UncompressByteToVertexOffset(cellid, byte_tmp_min_ids[cellid]); } bool Before(INDEX_TYPE a, INDEX_TYPE b) const { return mGridFunc->IsGreater(b, a); } struct ValInd { float val; INDEX_TYPE ind; ValInd() {} ValInd(float v, INDEX_TYPE i) : val(v), ind(i) {} }; void do_fastest_line(int X, const INDEX_TYPE base_offset, float* values, ValInd* o_max_row, ValInd* o_min_row) { // X = dim of line // float* values //out = float*, int* int dX = 2 * X - 1; // do 0th index //ValInd old(values[0], base_offset); float tmpold = values[0]; o_max_row[0].ind = 0 + base_offset; o_max_row[0].val = tmpold; o_min_row[0].ind = 0 + base_offset; o_min_row[0].val = tmpold; // now do rest! for (int i = 1; i < X; i++) { // fisrt get tmp next value and set int di_v = i * 2; // mesh index of vertex float tmpnew = values[i]; o_max_row[di_v].ind = base_offset + di_v; o_max_row[di_v].val = tmpnew; o_min_row[di_v].ind = base_offset + di_v; o_min_row[di_v].val = tmpnew; int di_e = i * 2 - 1; // mesh index of edge if (tmpold > tmpnew) { o_max_row[di_e].val = tmpold; o_max_row[di_e].ind = base_offset + di_v - 2; /// check this o_min_row[di_e].val = tmpnew; o_min_row[di_e].ind = base_offset + di_v; /// check this } else { // tmpold < tmpnew || di_v-2 < di_v <- 2nd part is always true o_max_row[di_e].val = tmpnew; o_max_row[di_e].ind = base_offset + di_v; /// check this o_min_row[di_e].val = tmpold; o_min_row[di_e].ind = base_offset + di_v - 2; /// check this } // tmpold = tmpnew; } } void do_parallel_lines_max(int X, const ValInd* row0, const ValInd* row1, ValInd* row_o) { int dX = 2 * X - 1; // now do rest! for (int i = 0; i < dX; i++) { // fisrt get tmp next value and set float v0 = row0[i].val; float v1 = row1[i].val; if (v0 > v1) { row_o[i].val = v0; row_o[i].ind = row0[i].ind; continue; } else if (v1 > v0) { row_o[i].val = v1; row_o[i].ind = row1[i].ind; continue; } // need index compare INDEX_TYPE i0 = row0[i].ind; INDEX_TYPE i1 = row1[i].ind; if (i0 > i1) { row_o[i].val = v0; row_o[i].ind = i0; } else { row_o[i].val = v1; row_o[i].ind = i1; } } } void do_parallel_lines_min(int X, const ValInd* row0, const ValInd* row1, ValInd* row_o) { int dX = 2 * X - 1; // now do rest! for (int i = 0; i < dX; i++) { // fisrt get tmp next value and set float v0 = row0[i].val; float v1 = row1[i].val; if (v0 < v1) { row_o[i].val = v0; row_o[i].ind = row0[i].ind; continue; } else if (v1 < v0) { row_o[i].val = v1; row_o[i].ind = row1[i].ind; continue; } // need index compare INDEX_TYPE i0 = row0[i].ind; INDEX_TYPE i1 = row1[i].ind; if (i0 < i1) { row_o[i].val = v0; row_o[i].ind = i0; } else { row_o[i].val = v1; row_o[i].ind = i1; } } } void write_max_stick(int dX, const INDEX_TYPE base_offset, ValInd* stick) { for (INDEX_TYPE i = 0; i < dX; i++) { INDEX_TYPE base_id = i + base_offset; byte_tmp_max_ids[base_id] = mMesh->CompressVertexOffsetToByte(base_id, stick[i].ind); } } void write_min_stick(int dX, const INDEX_TYPE base_offset, ValInd* stick) { for (INDEX_TYPE i = 0; i < dX; i++) { INDEX_TYPE base_id = i + base_offset; byte_tmp_min_ids[base_id] = mMesh->CompressVertexOffsetToByte(base_id, stick[i].ind); } } void ComputeOutput() { ThreadedTimer timer(1); timer.StartGlobal(); printf(" -- Creating maxV_labeling ..."); fflush(stdout); byte_tmp_max_ids = new BYTE_TYPE[mMesh->numCells()]; byte_tmp_min_ids = new BYTE_TYPE[mMesh->numCells()]; const Vec3l xyz = mGrid->XYZ(); const Vec3l dXYZ = mMesh->XYZ(); const INDEX_TYPE NUM_X = dXYZ[0]; const INDEX_TYPE NUM_Y = dXYZ[1]; const INDEX_TYPE SLAB_SIZE = NUM_X * NUM_Y; // INDEX_TYPE num_cells = mMesh->numCells(); #pragma omp parallel { int num_threads = omp_get_num_threads(); int thread_num = omp_get_thread_num(); // local storage ValInd* slab = new ValInd[SLAB_SIZE * 3]; ValInd* slab_min = new ValInd[SLAB_SIZE * 3]; //float* slab_vals = new FLOATTYPE[SLAB_SIZE * 3]; //INDEX_TYPE* slab_inds = new INDEX_TYPE[SLAB_SIZE * 3]; std::vector<INDEX_TYPE> partition; ArrayIndexPartitioner::EvenChunkSplit(xyz[2], num_threads, partition); //for (auto v : partition) printf("p %llu\n", v); const INDEX_TYPE START_K = partition[thread_num]; INDEX_TYPE localend = (thread_num == num_threads - 1 ? partition[thread_num + 1] - 1 : partition[thread_num + 1]); const INDEX_TYPE END_K = localend; ValInd* old_slab = slab; ValInd* mid_slab = &(slab[SLAB_SIZE * 1]); ValInd* new_slab = &(slab[SLAB_SIZE * 2]); ValInd* old_slab_min = slab_min; ValInd* mid_slab_min = &(slab_min[SLAB_SIZE * 1]); ValInd* new_slab_min = &(slab_min[SLAB_SIZE * 2]); for (INDEX_TYPE k = START_K; k <= END_K; k++) { //float* old_slab_vals = slab_vals; //float* mid_slab_vals; &(slab_vals[SLAB_SIZE * 1); //float* new_slab_vals = &(slab_vals[SLAB_SIZE * 2]); //INDEX_TYPE* old_slab_inds = slab_inds; //INDEX_TYPE* mid_slab_inds; &(slab_inds[SLAB_SIZE * 1); //INDEX_TYPE* new_slab_inds = &(slab_inds[SLAB_SIZE * 2]); for (INDEX_TYPE j = 0; j < xyz[1]; j++) { INDEX_TYPE stick_start_j0k0 = j * 2 * NUM_X; // get the row in the slab // J*2, K*2 // Do each vertex-edge-vertex-....-edge-vertex stick in the slab INDEX_TYPE base_cell_id = mMesh->coords2Cellid(Vec3l(0, j * 2, k * 2)); INDEX_TYPE id_data = mGrid->Index3d(Vec3l(0, j, k)); do_fastest_line(xyz[0], base_cell_id, &(mGridFunc->GetImage()[id_data]), &(new_slab[stick_start_j0k0]), &(new_slab_min[stick_start_j0k0])); write_max_stick(NUM_X, base_cell_id, &(new_slab[stick_start_j0k0])); write_min_stick(NUM_X, base_cell_id, &(new_slab_min[stick_start_j0k0])); // J*2-1, K*2 = mid of J*2-2, K*2 // Do the edge-face-edge-...-face-edge stick in the slab INDEX_TYPE stick_start_j2k0 = (j * 2 - 2) * NUM_X; // get the row in the slab INDEX_TYPE stick_start_j1k0 = (j * 2 - 1) * NUM_X; // get the row in the slab INDEX_TYPE base_cell_j1_id = mMesh->coords2Cellid(Vec3l(0, j * 2 - 1, k * 2)); if (j > 0) { // EDGE BETWEEN v(x, y:y-1, z) do_parallel_lines_max(xyz[0], &(new_slab[stick_start_j0k0]), &(new_slab[stick_start_j2k0]), &(new_slab[stick_start_j1k0])); write_max_stick(NUM_X, base_cell_j1_id, &(new_slab[stick_start_j1k0])); do_parallel_lines_min(xyz[0], &(new_slab_min[stick_start_j0k0]), &(new_slab_min[stick_start_j2k0]), &(new_slab_min[stick_start_j1k0])); write_min_stick(NUM_X, base_cell_j1_id, &(new_slab_min[stick_start_j1k0])); } // NOW FILL IN BETWEEN SLABS // J*2, K*2-1 = mid of J*2, K*2-2 // Do the edge-face-edge-...-face-edge stick in between slabs if (k > START_K) { INDEX_TYPE base_cell_k1_id = mMesh->coords2Cellid(Vec3l(0, j * 2, k * 2 - 1)); do_parallel_lines_max(xyz[0], &(new_slab[stick_start_j0k0]), &(old_slab[stick_start_j0k0]), &(mid_slab[stick_start_j0k0])); write_max_stick(NUM_X, base_cell_k1_id, &(mid_slab[stick_start_j0k0])); do_parallel_lines_min(xyz[0], &(new_slab_min[stick_start_j0k0]), &(old_slab_min[stick_start_j0k0]), &(mid_slab_min[stick_start_j0k0])); write_min_stick(NUM_X, base_cell_k1_id, &(mid_slab_min[stick_start_j0k0])); } // J*2-1, K*2-1 = mid of J*2-1, K*2-2 // Do the face-quad-face-....-quad-face stick between the slabs if (k > START_K && j > 0) { // face BETWEEN v(x, y:y-1, z:z-1) INDEX_TYPE base_mid_id = mMesh->coords2Cellid(Vec3l(0, j * 2 - 1, k * 2 - 1)); do_parallel_lines_max(xyz[0], &(new_slab[stick_start_j1k0]), &(old_slab[stick_start_j1k0]), &(mid_slab[stick_start_j1k0])); write_max_stick(NUM_X, base_mid_id, &(mid_slab[stick_start_j1k0])); do_parallel_lines_min(xyz[0], &(new_slab_min[stick_start_j1k0]), &(old_slab_min[stick_start_j1k0]), &(mid_slab_min[stick_start_j1k0])); write_min_stick(NUM_X, base_mid_id, &(mid_slab_min[stick_start_j1k0])); } } // swap new and old slabs ValInd* tmp = old_slab; old_slab = new_slab; new_slab = tmp; tmp = old_slab_min; old_slab_min = new_slab_min; new_slab_min = tmp; //do_fastest_line(xyz[0], id_cell, // &(mGridFunc->GetImage()[id_data]), // &(tmp_vals[id_cell]), // &(tmp_ids[id_cell])); //if (j > 0) { // // EDGE BETWEEN v(x, y:y-1, z) // INDEX_TYPE id_cell_last = mMesh->coords2Cellid(Vec3l(0, j * 2 - 2, k * 2)); // INDEX_TYPE id_cell_mid = mMesh->coords2Cellid(Vec3l(0, j * 2 - 1, k * 2)); // do_parallel_lines(xyz[0], // &(tmp_vals[id_cell]), // &(tmp_ids[id_cell]), // &(tmp_vals[id_cell_last]), // &(tmp_ids[id_cell_last]), // &(tmp_vals[id_cell_mid]), // &(tmp_ids[id_cell_mid])); //} //if (k > START_K) { // // EDGE BETWEEN v(x, y, z:z-1) // INDEX_TYPE id_cell_last = mMesh->coords2Cellid(Vec3l(0, j * 2, k * 2 - 2)); // INDEX_TYPE id_cell_mid = mMesh->coords2Cellid(Vec3l(0, j * 2, k * 2 - 1)); // do_parallel_lines(xyz[0], // &(tmp_vals[id_cell]), // &(tmp_ids[id_cell]), // &(tmp_vals[id_cell_last]), // &(tmp_ids[id_cell_last]), // &(tmp_vals[id_cell_mid]), // &(tmp_ids[id_cell_mid])); //} //if (k > START_K && j > 0) { // // face BETWEEN v(x, y:y-1, z:z-1) // INDEX_TYPE id_cell_e0 = mMesh->coords2Cellid(Vec3l(0, j * 2 - 2, k * 2 - 1)); // INDEX_TYPE id_cell_e1 = mMesh->coords2Cellid(Vec3l(0, j * 2, k * 2 - 1)); // INDEX_TYPE id_cell_mid = mMesh->coords2Cellid(Vec3l(0, j * 2 - 1, k * 2 - 1)); // do_parallel_lines(xyz[0], // &(tmp_vals[id_cell_e0]), // &(tmp_ids[id_cell_e0]), // &(tmp_vals[id_cell_e1]), // &(tmp_ids[id_cell_e1]), // &(tmp_vals[id_cell_mid]), // &(tmp_ids[id_cell_mid])); //} } //INDEX_TYPE num_cells = mMesh->numCells(); //for (INDEX_TYPE i = 0; i < num_cells; i++) { // byte_tmp_max_ids[i] = mMesh->CompressVertexOffsetToByte(i, tmp_ids[i]); // THIS COULD BE IMPROVED WITH BETTER HASH //} // INDEX_TYPE num_cells = mMesh->numCells(); // mMaxVLabel = new DenseLabeling<DIM_TYPE>(num_cells); // //mMaxVLabel->SetAll(0); // not needed since this is initialized to first vertex anyway // //return; //#pragma omp parallel // { // int num_threads = omp_get_num_threads(); // int thread_num = omp_get_thread_num(); // // std::vector<INDEX_TYPE> partition; // ArrayIndexPartitioner::EvenChunkSplit(num_cells, num_threads, partition); // // for (INDEX_TYPE cellid = partition[thread_num]; cellid < partition[thread_num + 1]; cellid++) { // mMaxVLabel->SetLabel(cellid, idxOf_maxVertex(cellid)); // } // } // // //MeshType::AllCellsIterator ait(mMesh); // //for (ait.begin(); ait.valid(); ait.advance()){ // // mMaxVLabel->SetLabel(ait.value(), idxOf_maxVertex(ait.value())); // //} delete[] slab; delete[] slab_min; } timer.EndGlobal(); printf(" Done! "); timer.PrintAll(); } }; template <class MeshType, class GridFuncType> class RegularGridMaxMinVertexLabeling2D { protected: BYTE_TYPE* byte_tmp_max_ids; BYTE_TYPE* byte_tmp_min_ids; const RegularGrid2D* mGrid; MeshType* mMesh; GridFuncType* mGridFunc; public: RegularGridMaxMinVertexLabeling2D(MeshType* mesh, GridFuncType* gridFunc) : mMesh(mesh), mGridFunc(gridFunc) { byte_tmp_max_ids = NULL; byte_tmp_min_ids = NULL; mGrid = mGridFunc->GetGrid(); } ~RegularGridMaxMinVertexLabeling2D() { if (byte_tmp_max_ids != NULL) delete byte_tmp_max_ids; if (byte_tmp_min_ids != NULL) delete byte_tmp_min_ids; } void HACK_init() { byte_tmp_max_ids = new BYTE_TYPE[mMesh->numCells()]; byte_tmp_min_ids = new BYTE_TYPE[mMesh->numCells()]; } BYTE_TYPE GetUncompressedMaxVal(INDEX_TYPE cellid) const { return byte_tmp_max_ids[cellid]; } BYTE_TYPE GetUncompressedMinVal(INDEX_TYPE cellid) const { return byte_tmp_min_ids[cellid]; } void SetUncompressedMaxVal(INDEX_TYPE cellid, BYTE_TYPE val) { byte_tmp_max_ids[cellid] = val; } void SetUncompressedMinVal(INDEX_TYPE cellid, BYTE_TYPE val) { byte_tmp_min_ids[cellid] = val; } INDEX_TYPE Cell2HighestVertex(INDEX_TYPE cellid) { return mMesh->UncompressByteToVertexOffset(cellid, byte_tmp_max_ids[cellid]); } INDEX_TYPE Cell2LowestVertex(INDEX_TYPE cellid) { return mMesh->UncompressByteToVertexOffset(cellid, byte_tmp_min_ids[cellid]); } bool Before(INDEX_TYPE a, INDEX_TYPE b) const { return mGridFunc->IsGreater(b, a); } struct ValInd { float val; INDEX_TYPE ind; ValInd() {} ValInd(float v, INDEX_TYPE i) : val(v), ind(i) {} }; void do_fastest_line(int X, const INDEX_TYPE base_offset, float* values, ValInd* o_max_row, ValInd* o_min_row) { // X = dim of line // float* values //out = float*, int* int dX = 2 * X - 1; // do 0th index //ValInd old(values[0], base_offset); float tmpold = values[0]; o_max_row[0].ind = 0 + base_offset; o_max_row[0].val = tmpold; o_min_row[0].ind = 0 + base_offset; o_min_row[0].val = tmpold; // now do rest! for (int i = 1; i < X; i++) { // fisrt get tmp next value and set int di_v = i * 2; // mesh index of vertex float tmpnew = values[i]; o_max_row[di_v].ind = base_offset + di_v; o_max_row[di_v].val = tmpnew; o_min_row[di_v].ind = base_offset + di_v; o_min_row[di_v].val = tmpnew; int di_e = i * 2 - 1; // mesh index of edge if (tmpold > tmpnew) { o_max_row[di_e].val = tmpold; o_max_row[di_e].ind = base_offset + di_v - 2; /// check this o_min_row[di_e].val = tmpnew; o_min_row[di_e].ind = base_offset + di_v; /// check this } else { // tmpold < tmpnew || di_v-2 < di_v <- 2nd part is always true o_max_row[di_e].val = tmpnew; o_max_row[di_e].ind = base_offset + di_v; /// check this o_min_row[di_e].val = tmpold; o_min_row[di_e].ind = base_offset + di_v - 2; /// check this } // tmpold = tmpnew; } } void do_parallel_lines_max(int X, const ValInd* row0, const ValInd* row1, ValInd* row_o) { int dX = 2 * X - 1; // now do rest! for (int i = 0; i < dX; i++) { // fisrt get tmp next value and set float v0 = row0[i].val; float v1 = row1[i].val; if (v0 > v1) { row_o[i].val = v0; row_o[i].ind = row0[i].ind; continue; } else if (v1 > v0) { row_o[i].val = v1; row_o[i].ind = row1[i].ind; continue; } // need index compare INDEX_TYPE i0 = row0[i].ind; INDEX_TYPE i1 = row1[i].ind; if (i0 > i1) { row_o[i].val = v0; row_o[i].ind = i0; } else { row_o[i].val = v1; row_o[i].ind = i1; } } } void do_parallel_lines_min(int X, const ValInd* row0, const ValInd* row1, ValInd* row_o) { int dX = 2 * X - 1; // now do rest! for (int i = 0; i < dX; i++) { // fisrt get tmp next value and set float v0 = row0[i].val; float v1 = row1[i].val; if (v0 < v1) { row_o[i].val = v0; row_o[i].ind = row0[i].ind; continue; } else if (v1 < v0) { row_o[i].val = v1; row_o[i].ind = row1[i].ind; continue; } // need index compare INDEX_TYPE i0 = row0[i].ind; INDEX_TYPE i1 = row1[i].ind; if (i0 < i1) { row_o[i].val = v0; row_o[i].ind = i0; } else { row_o[i].val = v1; row_o[i].ind = i1; } } } void write_max_stick(int dX, const INDEX_TYPE base_offset, ValInd* stick) { for (INDEX_TYPE i = 0; i < dX; i++) { INDEX_TYPE base_id = i + base_offset; byte_tmp_max_ids[base_id] = mMesh->CompressVertexOffsetToByte(base_id, stick[i].ind); } } void write_min_stick(int dX, const INDEX_TYPE base_offset, ValInd* stick) { for (INDEX_TYPE i = 0; i < dX; i++) { INDEX_TYPE base_id = i + base_offset; byte_tmp_min_ids[base_id] = mMesh->CompressVertexOffsetToByte(base_id, stick[i].ind); } } void ComputeOutput() { ThreadedTimer timer(1); timer.StartGlobal(); printf(" -- Creating maxV_labeling ..."); fflush(stdout); byte_tmp_max_ids = new BYTE_TYPE[mMesh->numCells()]; byte_tmp_min_ids = new BYTE_TYPE[mMesh->numCells()]; const Vec2l xy = mGrid->XY(); const Vec2l dXY = mMesh->XY(); const INDEX_TYPE NUM_X = dXY[0]; const INDEX_TYPE NUM_Y = dXY[1]; const INDEX_TYPE SLAB_SIZE = NUM_X ; // INDEX_TYPE num_cells = mMesh->numCells(); #pragma omp parallel { int num_threads = omp_get_num_threads(); int thread_num = omp_get_thread_num(); // local storage ValInd* slab = new ValInd[SLAB_SIZE * 3]; ValInd* slab_min = new ValInd[SLAB_SIZE * 3]; //float* slab_vals = new FLOATTYPE[SLAB_SIZE * 3]; //INDEX_TYPE* slab_inds = new INDEX_TYPE[SLAB_SIZE * 3]; std::vector<INDEX_TYPE> partition; ArrayIndexPartitioner::EvenChunkSplit(xy[1], num_threads, partition); //for (auto v : partition) printf("p %llu\n", v); const INDEX_TYPE START_J = partition[thread_num]; INDEX_TYPE localend = (thread_num == num_threads - 1 ? partition[thread_num + 1] - 1 : partition[thread_num + 1]); const INDEX_TYPE END_J = localend; ValInd* old_slab = slab; ValInd* mid_slab = &(slab[SLAB_SIZE * 1]); ValInd* new_slab = &(slab[SLAB_SIZE * 2]); ValInd* old_slab_min = slab_min; ValInd* mid_slab_min = &(slab_min[SLAB_SIZE * 1]); ValInd* new_slab_min = &(slab_min[SLAB_SIZE * 2]); for (INDEX_TYPE j = START_J; j <= END_J; j++) { // J*2 // Do each vertex-edge-vertex-....-edge-vertex stick in the slab INDEX_TYPE base_cell_id = mMesh->coords2Cellid(Vec2l(0, j * 2)); INDEX_TYPE id_data = mGrid->Index2d(Vec2l(0, j)); do_fastest_line(xy[0], base_cell_id, &(mGridFunc->GetImage()[id_data]), new_slab, new_slab_min); write_max_stick(NUM_X, base_cell_id, new_slab); write_min_stick(NUM_X, base_cell_id, new_slab_min); if (j > START_J) { // face BETWEEN v(x, y:y-1, z:z-1) INDEX_TYPE base_mid_id = mMesh->coords2Cellid(Vec2l(0, j * 2 - 1)); do_parallel_lines_max(xy[0], new_slab, old_slab, mid_slab); write_max_stick(NUM_X, base_mid_id, mid_slab); } // swap new and old slabs ValInd* tmp = old_slab; old_slab = new_slab; new_slab = tmp; tmp = old_slab_min; old_slab_min = new_slab_min; new_slab_min = tmp; } delete[] slab; delete[] slab_min; } timer.EndGlobal(); printf(" Done! "); timer.PrintAll(); } }; } #endif
copyprivate2.c
/*single and copyprivate*/ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int main (int argc, char * argv[]) { float x=0.0; int y=0; #ifdef _OPENMP omp_set_num_threads(4); #endif #pragma omp parallel private(x,y) { #pragma omp single copyprivate(x,y) { x=546.0; y=777; } printf("x=%f, y=%d\n",x,y); } return 0; }
is.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - IS This benchmark is an OpenMP C version of the NPB IS code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: M. Yarrow OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include "npb-C.h" #include "npbparams.h" #include <stdlib.h> #include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif /* _OPENMP */ /*****************************************************************/ /* For serial IS, buckets are not really req'd to solve NPB1 IS */ /* spec, but their use on some machines improves performance, on */ /* other machines the use of buckets compromises performance, */ /* probably because it is extra computation which is not req'd. */ /* (Note: Mechanism not understood, probably cache related) */ /* Example: SP2-66MhzWN: 50% speedup with buckets */ /* Example: SGI Indy5000: 50% slowdown with buckets */ /* Example: SGI O2000: 400% slowdown with buckets (Wow!) */ /*****************************************************************/ /* #define USE_BUCKETS */ /* buckets are not used in the OpenMP C version */ /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'S' #endif /*************/ /* CLASS S */ /*************/ #if CLASS == 'S' #define TOTAL_KEYS_LOG_2 16 #define MAX_KEY_LOG_2 11 #define NUM_BUCKETS_LOG_2 9 #endif /*************/ /* CLASS W */ /*************/ #if CLASS == 'W' #define TOTAL_KEYS_LOG_2 20 #define MAX_KEY_LOG_2 16 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS A */ /*************/ #if CLASS == 'A' #define TOTAL_KEYS_LOG_2 23 #define MAX_KEY_LOG_2 19 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS B */ /*************/ #if CLASS == 'B' #define TOTAL_KEYS_LOG_2 25 #define MAX_KEY_LOG_2 21 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS C */ /*************/ #if CLASS == 'C' #define TOTAL_KEYS_LOG_2 27 #define MAX_KEY_LOG_2 23 #define NUM_BUCKETS_LOG_2 10 #endif #define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2) #define MAX_KEY (1 << MAX_KEY_LOG_2) #define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2) #define NUM_KEYS TOTAL_KEYS #define SIZE_OF_BUFFERS NUM_KEYS #define MAX_ITERATIONS 10 #define TEST_ARRAY_SIZE 5 /*************************************/ /* Typedef: if necessary, change the */ /* size of int here by changing the */ /* int type to, say, long */ /*************************************/ typedef int INT_TYPE; /********************/ /* Some global info */ /********************/ INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */ /* copies of rank info */ int passed_verification; /************************************/ /* These are the three main arrays. */ /* See SIZE_OF_BUFFERS def above */ /************************************/ INT_TYPE key_array[SIZE_OF_BUFFERS], key_buff1[SIZE_OF_BUFFERS], key_buff2[SIZE_OF_BUFFERS], partial_verify_vals[TEST_ARRAY_SIZE]; #ifdef USE_BUCKETS INT_TYPE bucket_size[NUM_BUCKETS], bucket_ptrs[NUM_BUCKETS]; #endif /**********************/ /* Partial verif info */ /**********************/ INT_TYPE test_index_array[TEST_ARRAY_SIZE], test_rank_array[TEST_ARRAY_SIZE], S_test_index_array[TEST_ARRAY_SIZE] = {48427,17148,23627,62548,4431}, S_test_rank_array[TEST_ARRAY_SIZE] = {0,18,346,64917,65463}, W_test_index_array[TEST_ARRAY_SIZE] = {357773,934767,875723,898999,404505}, W_test_rank_array[TEST_ARRAY_SIZE] = {1249,11698,1039987,1043896,1048018}, A_test_index_array[TEST_ARRAY_SIZE] = {2112377,662041,5336171,3642833,4250760}, A_test_rank_array[TEST_ARRAY_SIZE] = {104,17523,123928,8288932,8388264}, B_test_index_array[TEST_ARRAY_SIZE] = {41869,812306,5102857,18232239,26860214}, B_test_rank_array[TEST_ARRAY_SIZE] = {33422937,10244,59149,33135281,99}, C_test_index_array[TEST_ARRAY_SIZE] = {44172927,72999161,74326391,129606274,21736814}, C_test_rank_array[TEST_ARRAY_SIZE] = {61147,882988,266290,133997595,133525895}; /***********************/ /* function prototypes */ /***********************/ double randlc2( double *X, double *A ); void full_verify( void ); /* * FUNCTION RANDLC (X, A) * * This routine returns a uniform pseudorandom double precision number in the * range (0, 1) by using the linear congruential generator * * x_{k+1} = a x_k (mod 2^46) * * where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers * before repeating. The argument A is the same as 'a' in the above formula, * and X is the same as x_0. A and X must be odd double precision integers * in the range (1, 2^46). The returned value RANDLC is normalized to be * between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain * the new seed x_1, so that subsequent calls to RANDLC using the same * arguments will generate a continuous sequence. * * This routine should produce the same results on any computer with at least * 48 mantissa bits in double precision floating point data. On Cray systems, * double precision should be disabled. * * David H. Bailey October 26, 1990 * * IMPLICIT DOUBLE PRECISION (A-H, O-Z) * SAVE KS, R23, R46, T23, T46 * DATA KS/0/ * * If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46, * T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than * by merely using the ** operator, in order to insure that the results are * exact on all systems. This code assumes that 0.5D0 is represented exactly. */ /*****************************************************************/ /************* R A N D L C ************/ /************* ************/ /************* portable random number generator ************/ /*****************************************************************/ double randlc2(double *X, double *A) { static int KS=0; static double R23, R46, T23, T46; double T1, T2, T3, T4; double A1; double A2; double X1; double X2; double Z; int i, j; if (KS == 0) { R23 = 1.0; R46 = 1.0; T23 = 1.0; T46 = 1.0; for (i=1; i<=23; i++) { R23 = 0.50 * R23; T23 = 2.0 * T23; } for (i=1; i<=46; i++) { R46 = 0.50 * R46; T46 = 2.0 * T46; } KS = 1; } /* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */ T1 = R23 * *A; j = T1; A1 = j; A2 = *A - T23 * A1; /* Break X into two parts such that X = 2^23 * X1 + X2, compute Z = A1 * X2 + A2 * X1 (mod 2^23), and then X = 2^23 * Z + A2 * X2 (mod 2^46). */ T1 = R23 * *X; j = T1; X1 = j; X2 = *X - T23 * X1; T1 = A1 * X2 + A2 * X1; j = R23 * T1; T2 = j; Z = T1 - T23 * T2; T3 = T23 * Z + A2 * X2; j = R46 * T3; T4 = j; *X = T3 - T46 * T4; return(R46 * *X); } /*****************************************************************/ /************* C R E A T E _ S E Q ************/ /*****************************************************************/ void create_seq( double seed, double a ) { double x; int i, j, k; k = MAX_KEY/4; for (i=0; i<NUM_KEYS; i++) { x = randlc2(&seed, &a); x += randlc2(&seed, &a); x += randlc2(&seed, &a); x += randlc2(&seed, &a); key_array[i] = k*x; } } /*****************************************************************/ /************* F U L L _ V E R I F Y ************/ /*****************************************************************/ void full_verify() { INT_TYPE i, j; INT_TYPE k; INT_TYPE m, unique_keys; /* Now, finally, sort the keys: */ for( i=0; i<NUM_KEYS; i++ ) key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i]; /* Confirm keys correctly sorted: count incorrectly sorted keys, if any */ j = 0; for( i=1; i<NUM_KEYS; i++ ) if( key_array[i-1] > key_array[i] ) j++; if( j != 0 ) { printf( "Full_verify: number of keys out of sort: %d\n", j ); } else passed_verification++; } /*****************************************************************/ /************* R A N K ****************/ /*****************************************************************/ void rank( int iteration ) { INT_TYPE i, j, k; INT_TYPE l, m; INT_TYPE shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2; INT_TYPE key; INT_TYPE min_key_val, max_key_val; INT_TYPE prv_buff1[MAX_KEY]; #pragma omp master { key_array[iteration] = iteration; key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration; /* Determine where the partial verify test keys are, load into */ /* top of array bucket_size */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) partial_verify_vals[i] = key_array[test_index_array[i]]; /* Clear the work array */ for( i=0; i<MAX_KEY; i++ ) key_buff1[i] = 0; } #pragma omp barrier for (i=0; i<MAX_KEY; i++) prv_buff1[i] = 0; /* Copy keys into work array; keys in key_array will be reused each iter. */ #pragma omp for nowait for( i=0; i<NUM_KEYS; i++ ) { key_buff2[i] = key_array[i]; /* Ranking of all keys occurs in this section: */ /* In this section, the keys themselves are used as their own indexes to determine how many of each there are: their individual population */ prv_buff1[key_buff2[i]]++; /* Now they have individual key */ } /* population */ for( i=0; i<MAX_KEY-1; i++ ) prv_buff1[i+1] += prv_buff1[i]; #pragma omp critical { for( i=0; i<MAX_KEY; i++ ) key_buff1[i] += prv_buff1[i]; } /* To obtain ranks of each key, successively add the individual key population, not forgetting to add m, the total of lesser keys, to the first key population */ #pragma omp barrier #pragma omp master { /* This is the partial verify test section */ /* Observe that test_rank_array vals are */ /* shifted differently for different cases */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) { k = partial_verify_vals[i]; /* test vals were put here */ if( 0 <= k && k <= NUM_KEYS-1 ) switch( CLASS ) { case 'S': if( i <= 2 ) { if( key_buff1[k-1] != test_rank_array[i]+iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'W': if( i < 2 ) { if( key_buff1[k-1] != test_rank_array[i]+(iteration-2) ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'A': if( i <= 2 ) { if( key_buff1[k-1] != test_rank_array[i]+(iteration-1) ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-(iteration-1) ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'B': if( i == 1 || i == 2 || i == 4 ) { if( key_buff1[k-1] != test_rank_array[i]+iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'C': if( i <= 2 ) { if( key_buff1[k-1] != test_rank_array[i]+iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; } } /* Make copies of rank info for use by full_verify: these variables in rank are local; making them global slows down the code, probably since they cannot be made register by compiler */ if( iteration == MAX_ITERATIONS ) key_buff_ptr_global = key_buff1; } /* end master */ } /*****************************************************************/ /************* M A I N ****************/ /*****************************************************************/ int main(int argc, char** argv ) { int i, iteration, itemp; int nthreads = 1; double timecounter, maxtime; /* Initialize the verification arrays if a valid class */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) switch( CLASS ) { case 'S': test_index_array[i] = S_test_index_array[i]; test_rank_array[i] = S_test_rank_array[i]; break; case 'A': test_index_array[i] = A_test_index_array[i]; test_rank_array[i] = A_test_rank_array[i]; break; case 'W': test_index_array[i] = W_test_index_array[i]; test_rank_array[i] = W_test_rank_array[i]; break; case 'B': test_index_array[i] = B_test_index_array[i]; test_rank_array[i] = B_test_rank_array[i]; break; case 'C': test_index_array[i] = C_test_index_array[i]; test_rank_array[i] = C_test_rank_array[i]; break; }; /* Printout initial NPB info */ printf( "\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - IS Benchmark\n\n" ); printf( " Size: %d (class %c)\n", TOTAL_KEYS, CLASS ); printf( " Iterations: %d\n", MAX_ITERATIONS ); /* Initialize timer */ timer_clear( 0 ); /* Generate random number sequence and subsequent keys on all procs */ create_seq( 314159265.00, /* Random number gen seed */ 1220703125.00 ); /* Random number gen mult */ /* Do one interation for free (i.e., untimed) to guarantee initialization of all data and code pages and respective tables */ #pragma omp parallel rank( 1 ); /* Start verification counter */ passed_verification = 0; if( CLASS != 'S' ) printf( "\n iteration\n" ); /* Start timer */ timer_start( 0 ); /* This is the main iteration */ #pragma omp parallel private(iteration) for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ ) { #pragma omp master if( CLASS != 'S' ) printf( " %d\n", iteration ); rank( iteration ); //#if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); //#endif /* _OPENMP */ } /* End of timing, obtain maximum time of all processors */ timer_stop( 0 ); timecounter = timer_read( 0 ); /* This tests that keys are in sequence: sorting of last ranked key seq occurs here, but is an untimed operation */ full_verify(); /* The final printout */ if( passed_verification != 5*MAX_ITERATIONS + 1 ) passed_verification = 0; c_print_results( "IS", CLASS, TOTAL_KEYS, 0, 0, MAX_ITERATIONS, nthreads, timecounter, ((double) (MAX_ITERATIONS*TOTAL_KEYS)) /timecounter/1000000., "keys ranked", passed_verification, NPBVERSION, COMPILETIME, CC, CLINK, C_LIB, C_INC, CFLAGS, CLINKFLAGS, "randlc"); return 0; /**************************/ } /* E N D P R O G R A M */ /**************************/
reduction-7.c
char z[10] = { 0 }; __attribute__((noinline, noclone)) void foo (int (*x)[3][2], int *y, long w[1][2]) { unsigned long long a[9] = {}; short b[5] = {}; int i; #pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \ reduction(*:y[:3]) reduction(|:a[:4]) \ reduction(&:w[0:1][:2]) reduction(max:b) for (i = 0; i < 128; i++) { x[i / 64][i % 3][(i / 4) & 1] += i; if ((i & 15) == 1) y[0] *= 3; if ((i & 31) == 2) y[1] *= 7; if ((i & 63) == 3) y[2] *= 17; z[i / 32] += (i & 3); if (i < 4) z[i] += i; a[i / 32] |= 1ULL << (i & 30); w[0][i & 1] &= ~(1L << (i / 17 * 3)); if ((i % 79) > b[0]) b[0] = i % 79; if ((i % 13) > b[1]) b[1] = i % 13; if ((i % 23) > b[2]) b[2] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; } for (i = 0; i < 9; i++) if (a[i] != (i < 4 ? 0x55555555ULL : 0)) __builtin_abort (); if (b[0] != 78 || b[1] != 12 || b[2] != 22 || b[3] != 84 || b[4] != 127) __builtin_abort (); } int main () { int a[4][3][2] = {}; static int a2[4][3][2] = {{{ 0, 0 }, { 0, 0 }, { 0, 0 }}, {{ 312, 381 }, { 295, 356 }, { 337, 335 }}, {{ 1041, 975 }, { 1016, 1085 }, { 935, 1060 }}, {{ 0, 0 }, { 0, 0 }, { 0, 0 }}}; int y[5] = { 0, 1, 1, 1, 0 }; int y2[5] = { 0, 6561, 2401, 289, 0 }; char z2[10] = { 48, 49, 50, 51, 0, 0, 0, 0, 0, 0 }; long w[1][2] = { ~0L, ~0L }; foo (&a[1], y + 1, w); if (__builtin_memcmp (a, a2, sizeof (a)) || __builtin_memcmp (y, y2, sizeof (y)) || __builtin_memcmp (z, z2, sizeof (z)) || w[0][0] != ~0x249249L || w[0][1] != ~0x249249L) __builtin_abort (); return 0; }
distribute_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd safelen(4) void test_no_clause() { int i; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{statement after '#pragma omp distribute simd' must be a for loop}} #pragma omp distribute simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as reduction}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; #pragma omp target #pragma omp teams for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) #pragma omp distribute simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd linear(x, y, z) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction() { int i, x, y; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction( : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(+ for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+: for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void linear_modifiers(int argc) { int k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(k) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(val(k)) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(uval(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(ref(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(foo(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; }
kvstore_dist_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file mxnet_node.h * \brief implement mxnet nodes */ #ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #include <mxnet/c_api.h> #include <mxnet/kvstore.h> #include <ps/ps.h> #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <memory> #include <functional> #include <future> #include <vector> #include "../profiler/profiler.h" #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/init_op.h" //dist_device_sync #define _STALE 0 using Staleness = uint64_t; using Callback = std::function<void()>; namespace mxnet { namespace kvstore { // maintain same order in frontend. enum class CommandType { kController, kSetMultiPrecision, kStopServer, kSyncMode, kSetGradientCompression, kSetProfilerParams }; enum class RequestType { kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull//, SSPDefaultPushPull }; struct DataHandleType { RequestType requestType; int dtype; }; /*! * Uses Cantor pairing function to generate a unique number given two numbers. * This number can also be inverted to find the unique pair whose Cantor value is this number. * Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function * \param requestType RequestType * \param dtype integer * \return Cantor value of arguments */ static int GetCommandType(RequestType requestType, int d) { int m = static_cast<int>(requestType); return (((m + d) * (m + d + 1)) / 2) + d; } /*! * Unpairs Cantor value and finds the two integers used to pair. * Then returns DataHandleType object with those numbers. * \param cmd DataHandleCommand generated by GetCommandType function * \return DataHandleType */ static DataHandleType DepairDataHandleType(int cmd) { int w = std::floor((std::sqrt(8 * cmd + 1) - 1)/2); int t = ((w * w) + w) / 2; int y = cmd - t; int x = w - y; CHECK_GE(x, 0); CHECK_GE(y, 0); DataHandleType type; type.requestType = static_cast<RequestType>(x); type.dtype = y; return type; } /** * \brief executor runs a function using the thread called \ref Start */ class Executor { public: /** * \brief start the executor */ void Start() { std::unique_lock<std::mutex> lk(mu_); while (true) { cond_.wait(lk, [this]{return !queue_.empty();}); Block blk = std::move(queue_.front()); queue_.pop(); lk.unlock(); if (blk.f) { blk.f(); blk.p->set_value(); } else { blk.p->set_value(); break; } lk.lock(); } } /** * \brief function */ typedef std::function<void()> Func; /** * \brief let the thread called \ref Start to exec a function. threadsafe */ void Exec(const Func& func) { Block blk(func); auto fut = blk.p->get_future(); { std::lock_guard<std::mutex> lk(mu_); queue_.push(std::move(blk)); cond_.notify_one(); } fut.wait(); } /** * \brief stop the thread, threadsafe */ void Stop() { Exec(Func()); } private: struct Block { explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { } Func f; std::shared_ptr<std::promise<void>> p; }; std::queue<Block> queue_; std::mutex mu_; std::condition_variable cond_; }; class KVStoreDistServer { public: KVStoreDistServer() { using namespace std::placeholders; ps_server_ = new ps::KVServer<char>(0); static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle( std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2)); ps_server_->set_request_handle( std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3)); sync_mode_ = false; stale = _STALE; //inital is gradient_compression_ = std::make_shared<GradientCompression>(); log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false); } ~KVStoreDistServer() { profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0)); delete ps_server_; } void set_controller(const KVStore::Controller& controller) { //Usually None CHECK(controller); controller_ = controller; } void set_updater(const KVStore::Updater& updater) { //Usually fixed CHECK(updater); updater_ = updater; } /** * \brief blocked until received the command \a kSyncMode */ void Run() { exec_.Start(); } private: struct UpdateBuf { std::vector<ps::KVMeta> request; NDArray merged; // temp_array is used to cast received values as float32 for computation if required NDArray temp_array; }; void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) { CommandType recved_type = static_cast<CommandType>(recved.head); switch (recved_type) { case CommandType::kStopServer: exec_.Stop(); break; case CommandType::kSyncMode: sync_mode_ = true; //Xin YAO std::string tmp_s = static_cast<std::string>(recved.body); _STALE = 0; for(int i=9; i<tmp_s.length(); i++){ //reading the staleness _STALE *= 10; _STALE += (tmp_s[i] - '0'); } break; case CommandType::kSetGradientCompression: gradient_compression_->DecodeParams(recved.body); break; case CommandType::kSetProfilerParams: // last char is the type of profiler command ProcessServerProfilerCommands(static_cast<KVStoreServerProfilerCommand> (recved.body.back() - '0'), recved.body); break; case CommandType::kSetMultiPrecision: // uses value 1 for message id from frontend if (!multi_precision_) { multi_precision_ = true; CreateMultiPrecisionCopies(); } break; case CommandType::kController: // this uses value 0 for message id from frontend // let the main thread to execute ctrl, which is necessary for python exec_.Exec([this, recved]() { CHECK(controller_); controller_(recved.head, recved.body); }); break; } app->Response(recved); } /* * For keys already initialized, if necessary create stored_realt. * This will only be used if by some wrong usage of kvstore, * some keys are initialized before optimizer is set. */ void CreateMultiPrecisionCopies() { for (auto const &stored_entry : store_) { const int key = stored_entry.first; const NDArray &stored = stored_entry.second; if (stored.dtype() != mshadow::kFloat32) { auto &stored_realt = store_realt_[key]; if (stored.storage_type() == kRowSparseStorage) { stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(), true, mshadow::kFloat32); } else { stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32); } auto &update = update_buf_[key]; if (!update.merged.is_none()) { if (update.merged.storage_type() == kRowSparseStorage) { update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(), true, mshadow::kFloat32); } else { update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false, mshadow::kFloat32); } } CHECK(update.request.size() == 0) << ps::MyRank() << "Multiprecision mode can not be set while pushes are underway." << "Please set optimizer before pushing keys." << key << " " << update.request.size(); CopyFromTo(stored, stored_realt); } } for (auto const &stored_realt_entry : store_realt_) { stored_realt_entry.second.WaitToRead(); } } void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) { switch (type) { case KVStoreServerProfilerCommand::kSetConfig: SetProfilerConfig(body.substr(0, body.size() - 1)); break; case KVStoreServerProfilerCommand::kState: MXSetProfilerState(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kPause: MXProfilePause(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kDump: MXDumpProfile(static_cast<int>(body.front() - '0')); break; } } void SetProfilerConfig(std::string params_str) { std::vector<std::string> elems; mxnet::kvstore::split(params_str, ',', std::back_inserter(elems)); std::vector<const char*> ckeys; std::vector<const char*> cvals; ckeys.reserve(elems.size()); cvals.reserve(elems.size()); for (size_t i=0; i < elems.size(); i++) { std::vector<std::string> parts; mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts)); CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker"; CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty"; CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter "<< parts[0]; if (parts[0] == "filename") { parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1]; } char* ckey = new char[parts[0].length() + 1]; std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str()); ckeys.push_back(ckey); char* cval = new char[parts[1].length() + 1]; std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str()); cvals.push_back(cval); } MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]); for (size_t i=0; i < ckeys.size(); i++) { delete[] ckeys[i]; delete[] cvals[i]; } } void DataHandleEx(const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { DataHandleType type = DepairDataHandleType(req_meta.cmd); switch (type.requestType) { case RequestType::kRowSparsePushPull: DataHandleRowSparse(type, req_meta, req_data, server); break; case RequestType::kCompressedPushPull: DataHandleCompressed(type, req_meta, req_data, server); break; case RequestType::kDefaultPushPull: stale == _STALE ? DataHandleDefault(type, req_meta, req_data, server):SSPDataHandleDefault(type, req_meta, req_data, server); break; //Xin Yao //case RequestType::SSPDefaultPushPull: //break; } } inline bool has_multi_precision_copy(const DataHandleType type) { return multi_precision_ && type.dtype != mshadow::kFloat32; } inline void ApplyUpdates(const DataHandleType type, const int key, UpdateBuf *update_buf, ps::KVServer<char>* server) { if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) { // let the main thread to execute updater_, which is necessary for python auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array; if (updater_) { exec_.Exec([this, key, &update, &stored](){ CHECK(updater_); updater_(key, update, &stored); }); } else { CHECK(sync_mode_) << "Updater needs to be set for async mode"; // if no updater, just copy CopyFromTo(update_buf->merged, &stored); } if (log_verbose_) { LOG(INFO) << "sent response to " << update_buf->request.size() << " workers"; } for (const auto& req : update_buf->request) { server->Response(req); } update_buf->request.clear(); if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); } else { update_buf->merged.WaitToRead(); } } void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices, const int64_t master_key, const int64_t num_rows) { indices[0] = 0; for (int64_t i = 1; i <= num_rows; i++) { int key = DecodeKey(keys[i]); auto row_id = key - master_key; indices[i - 1] = row_id; } } void AccumulateRowSparseGrads(const DataHandleType type, const NDArray& recved, UpdateBuf* updateBuf) { NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array); const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved; // accumulate row_sparse gradients using namespace mshadow; Engine::Get()->PushAsync( [to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) { op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>( {}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out}); on_complete(); }, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); CopyFromTo(out, &(updateBuf->merged), 0); updateBuf->merged.WaitToRead(); } void RowSparsePullResponse(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { if (log_verbose_) LOG(INFO) << "pull: " << master_key; ps::KVPairs<char> response; if (num_rows == 0) { std::vector<int> lens(req_data.keys.size(), 0); response.keys = req_data.keys; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); return; } const NDArray& stored = store_[master_key]; if (has_multi_precision_copy(type)) stored.WaitToRead(); CHECK(!stored.is_none()) << "init " << master_key << " first"; auto shape = stored.shape(); auto unit_len = shape.ProdShape(1, shape.ndim()); const int num_bytes = mshadow::mshadow_sizeof(type.dtype); const int unit_size = unit_len * num_bytes; const char* data = static_cast<char *> (stored.data().dptr_); auto len = num_rows * unit_size; // concat values response.vals.resize(len); #pragma omp parallel for for (size_t i = 1; i <= num_rows; i++) { int key = DecodeKey(req_data.keys[i]); int64_t row_id = key - master_key; const auto src = data + row_id * unit_size; auto begin = (i - 1) * unit_size; auto end = i * unit_size; response.vals.segment(begin, end).CopyFrom(src, unit_size); } // setup response response.keys = req_data.keys; std::vector<int> lens(req_data.keys.size(), unit_len); lens[0] = 0; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); } void InitRowSparseStored(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key]; int dtype = type.dtype; int num_bytes = mshadow::mshadow_sizeof(dtype); auto unit_len = req_data.lens[1] / num_bytes; CHECK_GT(unit_len, 0); size_t ds[] = {num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); stored = NDArray(kRowSparseStorage, dshape, Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) { store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype); } Engine::Get()->PushAsync( [this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) { NDArray rsp = stored; stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])}); mshadow::Stream<cpu> *s = ctx.get_stream<cpu>(); using namespace mxnet::op; nnvm::dim_t nnr = rsp.shape()[0]; MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, { IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>(); mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx); }); TBlob rsp_data = rsp.data(); // copies or casts as appropriate ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext()); on_complete(); }, recved.ctx(), {recved.var()}, {stored.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); if (has_multi_precision_copy(type)) { CopyFromTo(stored, store_[master_key]); store_[master_key].WaitToRead(); } stored.WaitToRead(); server->Response(req_meta); } void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { int master_key = DecodeKey(req_data.keys[0]); auto num_rows = req_data.keys.size() - 1; auto& stored = store_[master_key]; if (req_meta.push) { CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty"; CHECK_EQ(req_data.lens[0], 0); if (stored.is_none()) { if (log_verbose_) LOG(INFO) << "initial push: " << master_key; // initialization CHECK_GT(num_rows, 0) << "init with empty data is not supported"; InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server); return; } else { if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys; auto& updates = update_buf_[master_key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false, mshadow::kFloat32); } if (num_rows == 0) { if (sync_mode_) { if (updates.request.empty()) { // reset to zeros int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype; updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, merged_dtype); } // else nothing to aggregate updates.request.push_back(req_meta); ApplyUpdates(type, master_key, &updates, server); } else { server->Response(req_meta); } } else { auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype); CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); // data TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t) num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) // row_sparse NDArray NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); AccumulateRowSparseGrads(type, recved, &updates); } updates.request.push_back(req_meta); ApplyUpdates(type, master_key, &updates, server); } } } else { // pull RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server); } } void DefaultStorageResponse(const DataHandleType type, const int key, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { ps::KVPairs<char> response; const NDArray& stored = store_[key]; CHECK(!stored.is_none()) << "init " << key << " first"; // as server returns when store_realt is ready in this case if (has_multi_precision_copy(type)) stored.WaitToRead(); auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype()); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len); server->Response(req_meta, response); } //SSP Xin Yao void SSPDefaultStorageResponse(const DataHandleType type, const int key, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { ps::KVPairs<char> response; int current_iter = req_meta.staleness; /* * SSP condition * the slowest one + _STALE <= current_iter * we can not pull data until */ if (ticks[key] + stale <= current_iter) { // Wait //wait for the slow workers catch up callbacks_[ticks[key]].push_back( [this, key, req_meta, req_data, response, server]() mutable { const NDArray& stored = store_[key]; CHECK(!stored.is_none()) << "init " << key << " first"; // as server returns when store_realt is ready in this case if (has_multi_precision_copy(type)) stored.WaitToRead(); auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype()); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len); server->Response(req_meta, response); }); return; } const NDArray& stored = store_[key]; CHECK(!stored.is_none()) << "init " << key << " first"; // as server returns when store_realt is ready in this case if (has_multi_precision_copy(type)) stored.WaitToRead(); auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype()); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len); server->Response(req_meta, response); } void DataHandleCompressed(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { CHECK_EQ(type.dtype, mshadow::kFloat32) << "Gradient compression is currently supported for fp32 only"; if (req_meta.push) { // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished // first for dummy key which represents original size of array, whose len is 0 CHECK_EQ(req_data.keys.size(), (size_t)2); CHECK_EQ(req_data.lens.size(), (size_t)2); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]); int original_size = DecodeKey(req_data.keys[0]); int key = DecodeKey(req_data.keys[1]); auto& stored = store_[key]; size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)}; TShape dshape(ds, ds + 1); TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); NDArray decomp_buf = decomp_buf_[key]; dshape = TShape{(int64_t) original_size}; if (decomp_buf.is_none()) { decomp_buf = NDArray(dshape, Context()); } if (stored.is_none()) { stored = NDArray(dshape, Context()); gradient_compression_->Dequantize(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = update_buf_[key]; if (merged.merged.is_none()) { merged.merged = NDArray(dshape, Context()); } if (merged.request.size() == 0) { gradient_compression_->Dequantize(recved, &merged.merged, 0); } else { gradient_compression_->Dequantize(recved, &decomp_buf, 0); merged.merged += decomp_buf; } merged.request.push_back(req_meta); ApplyUpdates(type, key, &merged, server); } else { // async push gradient_compression_->Dequantize(recved, &decomp_buf, 0); exec_.Exec([this, key, &decomp_buf, &stored]() { CHECK(updater_); updater_(key, decomp_buf, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull CHECK_EQ(req_data.keys.size(), (size_t)1); CHECK_EQ(req_data.lens.size(), (size_t)0); int key = DecodeKey(req_data.keys[0]); DefaultStorageResponse(type, key, req_meta, req_data, server); } } void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished if (req_meta.push) { size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)}; TShape dshape(ds, ds + 1); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); if (stored.is_none()) { // initialization stored = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); CopyFromTo(recved, &stored, 0); server->Response(req_meta); if (has_multi_precision_copy(type)) { auto& stored_dtype = store_[key]; stored_dtype = NDArray(dshape, Context(), false, type.dtype); CopyFromTo(stored, stored_dtype); stored_dtype.WaitToRead(); } stored.WaitToRead(); } else { auto &updates = update_buf_[key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32); } if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); updates.merged += updates.temp_array; } else { updates.merged += recved; } } updates.request.push_back(req_meta); ApplyUpdates(type, key, &updates, server); } } else { DefaultStorageResponse(type, key, req_meta, req_data, server); } } //SSP Xin Yao void SSPDataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); int current_iter = req_meta.staleness; auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished if (req_meta.push) { size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)}; TShape dshape(ds, ds + 1); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); if (stored.is_none()) { // initialization stored = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); CopyFromTo(recved, &stored, 0); //finished push server->Response(req_meta); //response push to workers if (has_multi_precision_copy(type)) { auto& stored_dtype = store_[key]; stored_dtype = NDArray(dshape, Context(), false, type.dtype); CopyFromTo(stored, stored_dtype); stored_dtype.WaitToRead(); } stored.WaitToRead(); // After wait to read } else { auto &updates = update_buf_[key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32); } if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); updates.merged += updates.temp_array; } else { updates.merged += recved; } } updates.request.push_back(req_meta); ApplyUpdates(type, key, &updates, server); //response push to workers } //Xin Yao workercount[current_iter] += 1; // For this iteration, add it for each push while (workercount[ticks[key]] == ps::NumWorkers()) { // For a given key, if the staleness has been passed number of workers, add one //trigger a cb of pull auto& cbs = callbacks_[ticks[key]]; for (const auto& cb : cbs) { cb(); } ticks[key] += 1; } } else { SSPDefaultStorageResponse(type, key, req_meta, req_data, server); } } int DecodeKey(ps::Key key) { auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()]; return key - kr.begin(); } /** * \brief user defined mode for push */ bool sync_mode_; int stale; //let us see it KVStore::Controller controller_; KVStore::Updater updater_; /** * \brief store_ contains the value at kvstore for each key */ std::unordered_map<int, NDArray> store_; std::unordered_map<int, NDArray> store_realt_; /** * SSP controller */ std::unordered_map<int, Staleness> ticks; std::unordered_map<Staleness, int> workercount; std::unordered_map<Staleness, std::vector<Callback>> callbacks_; /** * \brief merge_buf_ is a buffer used if sync_mode is true. It represents * values from different workers being merged. The store will be updated * to this value when values from all workers are pushed into this buffer. */ std::unordered_map<int, UpdateBuf> update_buf_; /** * \brief decomp_buf_ is a buffer into which compressed values are * decompressed before merging to the store. used when compress_!='none' */ std::unordered_map<int, NDArray> decomp_buf_; Executor exec_; ps::KVServer<char>* ps_server_; // whether to LOG verbose information bool log_verbose_; /* * \brief whether to use multi precision mode. * in multi precision mode, all weights are stored as float32. * any gradient received will be cast to float32 before accumulation and updating of weights. */ bool multi_precision_; /** * \brief gradient compression object. * starts with none, used after SetGradientCompression sets the type * currently there is no support for unsetting gradient compression */ std::shared_ptr<kvstore::GradientCompression> gradient_compression_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
GrB_Matrix_nrows.c
//------------------------------------------------------------------------------ // GrB_Matrix_nrows: number of rows of a sparse matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GrB_Matrix_nrows // get the number of rows of a matrix ( GrB_Index *nrows, // matrix has nrows rows const GrB_Matrix A // matrix to query ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GrB_Matrix_nrows (&nrows, A)") ; GB_RETURN_IF_NULL (nrows) ; GB_RETURN_IF_NULL_OR_FAULTY (A) ; //-------------------------------------------------------------------------- // get the number of rows //-------------------------------------------------------------------------- (*nrows) = GB_NROWS (A) ; #pragma omp flush return (GrB_SUCCESS) ; }
GB_unop__atan_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__atan_fc32_fc32) // op(A') function: GB (_unop_tran__atan_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = catanf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = catanf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = catanf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__atan_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = catanf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = catanf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__atan_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
triangleCounting.h
int64_t T = 0 ; void outputTriangleCounting(graph *G) { printf("\nThe total number of Triangles = %lld\n", T); } void triangleCounting(graph *G) { inittracking("triangleCounting.csv"); T = 0; #pragma omp parallel { int64_t T_private = 0; node_t v; #if defined(PARFOR_GUIDED) #pragma omp for schedule(guided, PAR_CHUNKSIZE) #elif defined(PARFOR_DYNAMIC) #pragma omp for schedule(dynamic, PAR_CHUNKSIZE) #elif defined(TASKLOOP_DEFINED) #pragma omp taskloop num_tasks(NUM_TASKS) #else #pragma omp for schedule(static) #endif for (v = 0; v < G->numNodes; v ++) { edge_t u_idx; for (u_idx = G->begin[v]; u_idx < G->begin[v+1]; u_idx ++) { node_t u = G->node_idx [u_idx]; if (u > v) { edge_t w_idx; for (w_idx = G->begin[v]; w_idx < G->begin[v+1]; w_idx ++) { node_t w = G->node_idx [w_idx]; if (w > u) { edge_t s; for(s= G->begin[w]; s < G->begin[w+1]; s++) { node_t y = G->node_idx[s]; if(y == u) { T_private = T_private + 1 ; break; } } } } } } } #pragma omp atomic T += T_private; } endtracking(); }
dynwave.c
//----------------------------------------------------------------------------- // dynwave.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/20/14 (5.1.001) // 03/28/14 (5.1.002) // 09/15/14 (5.1.007) // 03/19/15 (5.1.008) // 08/01/16 (5.1.011) // 05/10/18 (5.1.013) // Author: L. Rossman (EPA) // M. Tryby (EPA) // R. Dickinson (CDM) // // Dynamic wave flow routing functions. // // This module solves the dynamic wave flow routing equations using // Picard Iterations (i.e., a method of successive approximations) // to solve the explicit form of the continuity and momentum equations // for conduits. // // Build 5.1.002: // - Only non-ponded nodal surface area is saved for use in // surcharge algorithm. // // Build 5.1.007: // - Node losses added to node outflow variable instead of treated // as a separate item when computing change in node flow volume. // // Build 5.1.008: // - Module-specific constants moved here from project.c. // - Support added for user-specified minimum variable time step. // - Node crown elevations found here instead of in flowrout.c module. // - OpenMP use to parallelize findLinkFlows() & findNodeDepths(). // - Bug in finding complete list of capacity limited links fixed. // // Build 5.1.011: // - Added test for failed memory allocation. // - Fixed illegal array index bug for Ideal Pumps. // // Build 5.1.013: // - Include omp.h protected against lack of compiler support for OpenMP. // - SurchargeMethod option used to decide how node surcharging is handled. // - Storage nodes allowed to pressurize if their surcharge depth > 0. // - Minimum flow needed to compute a Courant time step modified. // // Build 5.1.014: // - updateNodeFlows() modified to subtract conduit evap. and seepage losses // from downstream node inflow instead of upstream node outflow. // //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include "headers.h" #include <stdlib.h> #include <math.h> #if defined(_OPENMP) //(5.1.013) #include <omp.h> #endif //----------------------------------------------------------------------------- // Constants //----------------------------------------------------------------------------- static const double MINTIMESTEP = 0.001; // min. time step (sec) static const double OMEGA = 0.5; // under-relaxation parameter static const double DEFAULT_SURFAREA = 12.566; // Min. nodal surface area (~4 ft diam.) static const double DEFAULT_HEADTOL = 0.005; // Default head tolerance (ft) static const double EXTRAN_CROWN_CUTOFF = 0.96; // crown cutoff for EXTRAN //(5.1.013) static const double SLOT_CROWN_CUTOFF = 0.985257; // crown cutoff for SLOT //(5.1.013) static const int DEFAULT_MAXTRIALS = 8; // Max. trials per time step //----------------------------------------------------------------------------- // Data Structures //----------------------------------------------------------------------------- typedef struct { char converged; // TRUE if iterations for a node done double newSurfArea; // current surface area (ft2) double oldSurfArea; // previous surface area (ft2) double sumdqdh; // sum of dqdh from adjoining links double dYdT; // change in depth w.r.t. time (ft/sec) } TXnode; //----------------------------------------------------------------------------- // Shared Variables //----------------------------------------------------------------------------- static double VariableStep; // size of variable time step (sec) static TXnode* Xnode; // extended nodal information static double Omega; // actual under-relaxation parameter static int Steps; // number of Picard iterations //----------------------------------------------------------------------------- // Function declarations //----------------------------------------------------------------------------- static void initRoutingStep(void); static void initNodeStates(void); static void findBypassedLinks(); static void findLimitedLinks(); static void findLinkFlows(double dt); static int isTrueConduit(int link); static void findNonConduitFlow(int link, double dt); static void findNonConduitSurfArea(int link); static double getModPumpFlow(int link, double q, double dt); static void updateNodeFlows(int link); static int findNodeDepths(double dt); static void setNodeDepth(int node, double dt); static double getFloodedDepth(int node, int canPond, double dV, double yNew, double yMax, double dt); static double getVariableStep(double maxStep); static double getLinkStep(double tMin, int *minLink); static double getNodeStep(double tMin, int *minNode); //============================================================================= void dynwave_init() // // Input: none // Output: none // Purpose: initializes dynamic wave routing method. // { int i, j; double z; VariableStep = 0.0; Xnode = (TXnode *) calloc(Nobjects[NODE], sizeof(TXnode)); if ( Xnode == NULL ) { report_writeErrorMsg(ERR_MEMORY, " Not enough memory for dynamic wave routing."); return; } // --- initialize node surface areas & crown elev. for (i = 0; i < Nobjects[NODE]; i++ ) { Xnode[i].newSurfArea = 0.0; Xnode[i].oldSurfArea = 0.0; Node[i].crownElev = Node[i].invertElev; } // --- initialize links & update node crown elevations for (i = 0; i < Nobjects[LINK]; i++) { j = Link[i].node1; z = Node[j].invertElev + Link[i].offset1 + Link[i].xsect.yFull; Node[j].crownElev = MAX(Node[j].crownElev, z); j = Link[i].node2; z = Node[j].invertElev + Link[i].offset2 + Link[i].xsect.yFull; Node[j].crownElev = MAX(Node[j].crownElev, z); Link[i].flowClass = DRY; Link[i].dqdh = 0.0; } // --- set crown cutoff for finding top width of closed conduits //(5.1.013) if ( SurchargeMethod == SLOT ) CrownCutoff = SLOT_CROWN_CUTOFF; //(5.1.013) else CrownCutoff = EXTRAN_CROWN_CUTOFF; //(5.1.013) } //============================================================================= void dynwave_close() // // Input: none // Output: none // Purpose: frees memory allocated for dynamic wave routing method. // { FREE(Xnode); } //============================================================================= void dynwave_validate() // // Input: none // Output: none // Purpose: adjusts dynamic wave routing options. // { if ( MinRouteStep > RouteStep ) MinRouteStep = RouteStep; if ( MinRouteStep < MINTIMESTEP ) MinRouteStep = MINTIMESTEP; if ( MinSurfArea == 0.0 ) MinSurfArea = DEFAULT_SURFAREA; else MinSurfArea /= UCF(LENGTH) * UCF(LENGTH); if ( HeadTol == 0.0 ) HeadTol = DEFAULT_HEADTOL; else HeadTol /= UCF(LENGTH); if ( MaxTrials == 0 ) MaxTrials = DEFAULT_MAXTRIALS; } //============================================================================= double dynwave_getRoutingStep(double fixedStep) // // Input: fixedStep = user-supplied fixed time step (sec) // Output: returns routing time step (sec) // Purpose: computes variable routing time step if applicable. // { // --- use user-supplied fixed step if variable step option turned off // or if its smaller than the min. allowable variable time step if ( CourantFactor == 0.0 ) return fixedStep; if ( fixedStep < MINTIMESTEP ) return fixedStep; // --- at start of simulation (when current variable step is zero) // use the minimum allowable time step if ( VariableStep == 0.0 ) { VariableStep = MinRouteStep; } // --- otherwise compute variable step based on current flow solution else VariableStep = getVariableStep(fixedStep); // --- adjust step to be a multiple of a millisecond VariableStep = floor(1000.0 * VariableStep) / 1000.0; return VariableStep; } //============================================================================= int dynwave_execute(double tStep) // // Input: links = array of topo sorted links indexes // tStep = time step (sec) // Output: returns number of iterations used // Purpose: routes flows through drainage network over current time step. // { int converged; // --- initialize if ( ErrorCode ) return 0; Steps = 0; converged = FALSE; Omega = OMEGA; initRoutingStep(); // --- keep iterating until convergence while ( Steps < MaxTrials ) { // --- execute a routing step & check for nodal convergence initNodeStates(); findLinkFlows(tStep); converged = findNodeDepths(tStep); Steps++; if ( Steps > 1 ) { if ( converged ) break; // --- check if link calculations can be skipped in next step findBypassedLinks(); } } if ( !converged ) NonConvergeCount++; // --- identify any capacity-limited conduits findLimitedLinks(); return Steps; } //============================================================================= void initRoutingStep() { int i; for (i = 0; i < Nobjects[NODE]; i++) { Xnode[i].converged = FALSE; Xnode[i].dYdT = 0.0; } for (i = 0; i < Nobjects[LINK]; i++) { Link[i].bypassed = FALSE; Link[i].surfArea1 = 0.0; Link[i].surfArea2 = 0.0; } // --- a2 preserves conduit area from solution at last time step for ( i = 0; i < Nlinks[CONDUIT]; i++) Conduit[i].a2 = Conduit[i].a1; } //============================================================================= void initNodeStates() // // Input: none // Output: none // Purpose: initializes node's surface area, inflow & outflow // { int i; for (i = 0; i < Nobjects[NODE]; i++) { // --- initialize nodal surface area if ( AllowPonding ) { Xnode[i].newSurfArea = node_getPondedArea(i, Node[i].newDepth); } else { Xnode[i].newSurfArea = node_getSurfArea(i, Node[i].newDepth); } /* //// Removed for release 5.1.013. /// //(5.1.013) if ( Xnode[i].newSurfArea < MinSurfArea ) { Xnode[i].newSurfArea = MinSurfArea; } */ // --- initialize nodal inflow & outflow Node[i].inflow = 0.0; Node[i].outflow = Node[i].losses; if ( Node[i].newLatFlow >= 0.0 ) { Node[i].inflow += Node[i].newLatFlow; } else { Node[i].outflow -= Node[i].newLatFlow; } Xnode[i].sumdqdh = 0.0; } } //============================================================================= void findBypassedLinks() { int i; for (i = 0; i < Nobjects[LINK]; i++) { if ( Xnode[Link[i].node1].converged && Xnode[Link[i].node2].converged ) Link[i].bypassed = TRUE; else Link[i].bypassed = FALSE; } } //============================================================================= void findLimitedLinks() // // Input: none // Output: none // Purpose: determines if a conduit link is capacity limited. // { int j, n1, n2, k; double h1, h2; for (j = 0; j < Nobjects[LINK]; j++) { // ---- check only non-dummy conduit links if ( !isTrueConduit(j) ) continue; // --- check that upstream end is full k = Link[j].subIndex; Conduit[k].capacityLimited = FALSE; if ( Conduit[k].a1 >= Link[j].xsect.aFull ) { // --- check if HGL slope > conduit slope n1 = Link[j].node1; n2 = Link[j].node2; h1 = Node[n1].newDepth + Node[n1].invertElev; h2 = Node[n2].newDepth + Node[n2].invertElev; if ( (h1 - h2) > fabs(Conduit[k].slope) * Conduit[k].length ) Conduit[k].capacityLimited = TRUE; } } } //============================================================================= void findLinkFlows(double dt) { int i; // --- find new flow in each non-dummy conduit #pragma omp parallel num_threads(NumThreads) { #pragma omp for for ( i = 0; i < Nobjects[LINK]; i++) { if ( isTrueConduit(i) && !Link[i].bypassed ) dwflow_findConduitFlow(i, Steps, Omega, dt); } } // --- update inflow/outflows for nodes attached to non-dummy conduits for ( i = 0; i < Nobjects[LINK]; i++) { if ( isTrueConduit(i) ) updateNodeFlows(i); } // --- find new flows for all dummy conduits, pumps & regulators for ( i = 0; i < Nobjects[LINK]; i++) { if ( !isTrueConduit(i) ) { if ( !Link[i].bypassed ) findNonConduitFlow(i, dt); updateNodeFlows(i); } } } //============================================================================= int isTrueConduit(int j) { return ( Link[j].type == CONDUIT && Link[j].xsect.type != DUMMY ); } //============================================================================= void findNonConduitFlow(int i, double dt) // // Input: i = link index // dt = time step (sec) // Output: none // Purpose: finds new flow in a non-conduit-type link // { double qLast; // previous link flow (cfs) double qNew; // new link flow (cfs) // --- get link flow from last iteration qLast = Link[i].newFlow; Link[i].dqdh = 0.0; // --- get new inflow to link from its upstream node // (link_getInflow returns 0 if flap gate closed or pump is offline) qNew = link_getInflow(i); if ( Link[i].type == PUMP ) qNew = getModPumpFlow(i, qNew, dt); // --- find surface area at each end of link findNonConduitSurfArea(i); // --- apply under-relaxation with flow from previous iteration; // --- do not allow flow to change direction without first being 0 if ( Steps > 0 && Link[i].type != PUMP ) { qNew = (1.0 - Omega) * qLast + Omega * qNew; if ( qNew * qLast < 0.0 ) qNew = 0.001 * SGN(qNew); } Link[i].newFlow = qNew; } //============================================================================= double getModPumpFlow(int i, double q, double dt) // // Input: i = link index // q = pump flow from pump curve (cfs) // dt = time step (sec) // Output: returns modified pump flow rate (cfs) // Purpose: modifies pump curve pumping rate depending on amount of water // available at pump's inlet node. // { int j = Link[i].node1; // pump's inlet node index int k = Link[i].subIndex; // pump's index double newNetInflow; // inflow - outflow rate (cfs) double netFlowVolume; // inflow - outflow volume (ft3) double y; // node depth (ft) if ( q == 0.0 ) return q; // --- case where inlet node is a storage node: // prevent node volume from going negative if ( Node[j].type == STORAGE ) return node_getMaxOutflow(j, q, dt); // --- case where inlet is a non-storage node switch ( Pump[k].type ) { // --- for Type1 pump, a volume is computed for inlet node, // so make sure it doesn't go negative case TYPE1_PUMP: return node_getMaxOutflow(j, q, dt); // --- for other types of pumps, if pumping rate would make depth // at upstream node negative, then set pumping rate = inflow case TYPE2_PUMP: case TYPE4_PUMP: case TYPE3_PUMP: newNetInflow = Node[j].inflow - Node[j].outflow - q; netFlowVolume = 0.5 * (Node[j].oldNetInflow + newNetInflow ) * dt; y = Node[j].oldDepth + netFlowVolume / Xnode[j].newSurfArea; if ( y <= 0.0 ) return Node[j].inflow; } return q; } //============================================================================= void findNonConduitSurfArea(int i) // // Input: i = link index // Output: none // Purpose: finds the surface area contributed by a non-conduit // link to its upstream and downstream nodes. // { if ( Link[i].type == ORIFICE ) { Link[i].surfArea1 = Orifice[Link[i].subIndex].surfArea / 2.; } // --- no surface area for weirs to maintain SWMM 4 compatibility else Link[i].surfArea1 = 0.0; Link[i].surfArea2 = Link[i].surfArea1; if ( Link[i].flowClass == UP_CRITICAL || Node[Link[i].node1].type == STORAGE ) Link[i].surfArea1 = 0.0; if ( Link[i].flowClass == DN_CRITICAL || Node[Link[i].node2].type == STORAGE ) Link[i].surfArea2 = 0.0; } //============================================================================= void updateNodeFlows(int i) // // Input: i = link index // q = link flow rate (cfs) // Output: none // Purpose: updates cumulative inflow & outflow at link's end nodes. // { int k; int barrels = 1; int n1 = Link[i].node1; int n2 = Link[i].node2; double q = Link[i].newFlow; double uniformLossRate = 0.0; // --- compute any uniform seepage loss from a conduit if ( Link[i].type == CONDUIT ) { k = Link[i].subIndex; uniformLossRate = Conduit[k].evapLossRate + Conduit[k].seepLossRate; barrels = Conduit[k].barrels; uniformLossRate *= barrels; //(5.1.014) } // --- update total inflow & outflow at upstream/downstream nodes if ( q >= 0.0 ) { Node[n1].outflow += q; //(5.1.014) Node[n2].inflow += q - uniformLossRate; //(5.1.014) } else { Node[n1].inflow -= q + uniformLossRate; //(5.1.014) Node[n2].outflow -= q; //(5.1.014) } // --- add surf. area contributions to upstream/downstream nodes Xnode[Link[i].node1].newSurfArea += Link[i].surfArea1 * barrels; Xnode[Link[i].node2].newSurfArea += Link[i].surfArea2 * barrels; // --- update summed value of dqdh at each end node Xnode[Link[i].node1].sumdqdh += Link[i].dqdh; if ( Link[i].type == PUMP ) { k = Link[i].subIndex; if ( Pump[k].type != TYPE4_PUMP ) { Xnode[n2].sumdqdh += Link[i].dqdh; } } else Xnode[n2].sumdqdh += Link[i].dqdh; } //============================================================================= int findNodeDepths(double dt) { int i; int converged; // convergence flag double yOld; // previous node depth (ft) // --- compute outfall depths based on flow in connecting link for ( i = 0; i < Nobjects[LINK]; i++ ) link_setOutfallDepth(i); // --- compute new depth for all non-outfall nodes and determine if // depth change from previous iteration is below tolerance converged = TRUE; #pragma omp parallel num_threads(NumThreads) { #pragma omp for private(yOld) for ( i = 0; i < Nobjects[NODE]; i++ ) { if ( Node[i].type == OUTFALL ) continue; yOld = Node[i].newDepth; setNodeDepth(i, dt); Xnode[i].converged = TRUE; if ( fabs(yOld - Node[i].newDepth) > HeadTol ) { converged = FALSE; Xnode[i].converged = FALSE; } } } return converged; } //============================================================================= void setNodeDepth(int i, double dt) // // Input: i = node index // dt = time step (sec) // Output: none // Purpose: sets depth at non-outfall node after current time step. // { int canPond; // TRUE if node can pond overflows int isPonded; // TRUE if node is currently ponded int isSurcharged = FALSE; // TRUE if node is surcharged //(5.1.013) double dQ; // inflow minus outflow at node (cfs) double dV; // change in node volume (ft3) double dy; // change in node depth (ft) double yMax; // max. depth at node (ft) double yOld; // node depth at previous time step (ft) double yLast; // previous node depth (ft) double yNew; // new node depth (ft) double yCrown; // depth to node crown (ft) double surfArea; // node surface area (ft2) double denom; // denominator term double corr; // correction factor double f; // relative surcharge depth // --- see if node can pond water above it canPond = (AllowPonding && Node[i].pondedArea > 0.0); isPonded = (canPond && Node[i].newDepth > Node[i].fullDepth); // --- initialize values yCrown = Node[i].crownElev - Node[i].invertElev; yOld = Node[i].oldDepth; yLast = Node[i].newDepth; Node[i].overflow = 0.0; surfArea = Xnode[i].newSurfArea; surfArea = MAX(surfArea, MinSurfArea); //(5.1.013) // --- determine average net flow volume into node over the time step dQ = Node[i].inflow - Node[i].outflow; dV = 0.5 * (Node[i].oldNetInflow + dQ) * dt; //// Following code segment added to release 5.1.013. //// //(5.1.013) // --- determine if node is EXTRAN surcharged if (SurchargeMethod == EXTRAN) { // --- ponded nodes don't surcharge if (isPonded) isSurcharged = FALSE; // --- closed storage units that are full are in surcharge else if (Node[i].type == STORAGE) { isSurcharged = (Node[i].surDepth > 0.0 && yLast > Node[i].fullDepth); } // --- surcharge occurs when node depth exceeds top of its highest link else isSurcharged = (yCrown > 0.0 && yLast > yCrown); } ///////////////////////////////////////////////////////////// // --- if node not surcharged, base depth change on surface area if (!isSurcharged) //(5.1.013) { dy = dV / surfArea; yNew = yOld + dy; // --- save non-ponded surface area for use in surcharge algorithm if ( !isPonded ) Xnode[i].oldSurfArea = surfArea; // --- apply under-relaxation to new depth estimate if ( Steps > 0 ) { yNew = (1.0 - Omega) * yLast + Omega * yNew; } // --- don't allow a ponded node to drop much below full depth if ( isPonded && yNew < Node[i].fullDepth ) yNew = Node[i].fullDepth - FUDGE; } // --- if node surcharged, base depth change on dqdh // NOTE: depth change is w.r.t depth from previous // iteration; also, do not apply under-relaxation. else { // --- apply correction factor for upstream terminal nodes corr = 1.0; if ( Node[i].degree < 0 ) corr = 0.6; // --- allow surface area from last non-surcharged condition // to influence dqdh if depth close to crown depth denom = Xnode[i].sumdqdh; if ( yLast < 1.25 * yCrown ) { f = (yLast - yCrown) / yCrown; denom += (Xnode[i].oldSurfArea/dt - Xnode[i].sumdqdh) * exp(-15.0 * f); } // --- compute new estimate of node depth if ( denom == 0.0 ) dy = 0.0; else dy = corr * dQ / denom; yNew = yLast + dy; if ( yNew < yCrown ) yNew = yCrown - FUDGE; // --- don't allow a newly ponded node to rise much above full depth if ( canPond && yNew > Node[i].fullDepth ) yNew = Node[i].fullDepth + FUDGE; } // --- depth cannot be negative if ( yNew < 0 ) yNew = 0.0; // --- determine max. non-flooded depth yMax = Node[i].fullDepth; if ( canPond == FALSE ) yMax += Node[i].surDepth; // --- find flooded depth & volume if ( yNew > yMax ) { yNew = getFloodedDepth(i, canPond, dV, yNew, yMax, dt); } else Node[i].newVolume = node_getVolume(i, yNew); // --- compute change in depth w.r.t. time Xnode[i].dYdT = fabs(yNew - yOld) / dt; // --- save new depth for node Node[i].newDepth = yNew; } //============================================================================= double getFloodedDepth(int i, int canPond, double dV, double yNew, double yMax, double dt) // // Input: i = node index // canPond = TRUE if water can pond over node // isPonded = TRUE if water is currently ponded // dV = change in volume over time step (ft3) // yNew = current depth at node (ft) // yMax = max. depth at node before ponding (ft) // dt = time step (sec) // Output: returns depth at node when flooded (ft) // Purpose: computes depth, volume and overflow for a flooded node. // { if ( canPond == FALSE ) { Node[i].overflow = dV / dt; Node[i].newVolume = Node[i].fullVolume; yNew = yMax; } else { Node[i].newVolume = MAX((Node[i].oldVolume+dV), Node[i].fullVolume); Node[i].overflow = (Node[i].newVolume - MAX(Node[i].oldVolume, Node[i].fullVolume)) / dt; } if ( Node[i].overflow < FUDGE ) Node[i].overflow = 0.0; return yNew; } //============================================================================= double getVariableStep(double maxStep) // // Input: maxStep = user-supplied max. time step (sec) // Output: returns time step (sec) // Purpose: finds time step that satisfies stability criterion but // is no greater than the user-supplied max. time step. // { int minLink = -1; // index of link w/ min. time step int minNode = -1; // index of node w/ min. time step double tMin; // allowable time step (sec) double tMinLink; // allowable time step for links (sec) double tMinNode; // allowable time step for nodes (sec) // --- find stable time step for links & then nodes tMin = maxStep; tMinLink = getLinkStep(tMin, &minLink); tMinNode = getNodeStep(tMinLink, &minNode); // --- use smaller of the link and node time step tMin = tMinLink; if ( tMinNode < tMin ) { tMin = tMinNode ; minLink = -1; } // --- update count of times the minimum node or link was critical stats_updateCriticalTimeCount(minNode, minLink); // --- don't let time step go below an absolute minimum if ( tMin < MinRouteStep ) tMin = MinRouteStep; return tMin; } //============================================================================= double getLinkStep(double tMin, int *minLink) // // Input: tMin = critical time step found so far (sec) // Output: minLink = index of link with critical time step; // returns critical time step (sec) // Purpose: finds critical time step for conduits based on Courant criterion. // { int i; // link index int k; // conduit index double q; // conduit flow (cfs) double t; // time step (sec) double tLink = tMin; // critical link time step (sec) // --- examine each conduit link for ( i = 0; i < Nobjects[LINK]; i++ ) { if ( Link[i].type == CONDUIT ) { // --- skip conduits with negligible flow, area or Fr k = Link[i].subIndex; q = fabs(Link[i].newFlow) / Conduit[k].barrels; if ( q <= FUDGE //(5.1.013) || Conduit[k].a1 <= FUDGE || Link[i].froude <= 0.01 ) continue; // --- compute time step to satisfy Courant condition t = Link[i].newVolume / Conduit[k].barrels / q; t = t * Conduit[k].modLength / link_getLength(i); t = t * Link[i].froude / (1.0 + Link[i].froude) * CourantFactor; // --- update critical link time step if ( t < tLink ) { tLink = t; *minLink = i; } } } return tLink; } //============================================================================= double getNodeStep(double tMin, int *minNode) // // Input: tMin = critical time step found so far (sec) // Output: minNode = index of node with critical time step; // returns critical time step (sec) // Purpose: finds critical time step for nodes based on max. allowable // projected change in depth. // { int i; // node index double maxDepth; // max. depth allowed at node (ft) double dYdT; // change in depth per unit time (ft/sec) double t1; // time needed to reach depth limit (sec) double tNode = tMin; // critical node time step (sec) // --- find smallest time so that estimated change in nodal depth // does not exceed safety factor * maxdepth for ( i = 0; i < Nobjects[NODE]; i++ ) { // --- see if node can be skipped if ( Node[i].type == OUTFALL ) continue; if ( Node[i].newDepth <= FUDGE) continue; if ( Node[i].newDepth + FUDGE >= Node[i].crownElev - Node[i].invertElev ) continue; // --- define max. allowable depth change using crown elevation maxDepth = (Node[i].crownElev - Node[i].invertElev) * 0.25; if ( maxDepth < FUDGE ) continue; dYdT = Xnode[i].dYdT; if (dYdT < FUDGE ) continue; // --- compute time to reach max. depth & compare with critical time t1 = maxDepth / dYdT; if ( t1 < tNode ) { tNode = t1; *minNode = i; } } return tNode; }
omp_task_untied.c
<ompts:test> <ompts:testdescription>Test for untied clause. First generate a set of tasks and pause it immediately. Then we resume half of them and check whether they are scheduled by different threads</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp task untied</ompts:directive> <ompts:dependences>omp taskwait</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int <ompts:testcode:functionname>omp_task_untied</ompts:testcode:functionname>(FILE * logFile){ <ompts:orphan:vars> int i; int count; int start_tid[NUM_TASKS]; int current_tid[NUM_TASKS]; </ompts:orphan:vars> count = 0; /*initialization*/ for (i=0; i< NUM_TASKS; i++){ start_tid[i]=0; current_tid[i]=0; } #pragma omp parallel firstprivate(i) { #pragma omp single { for (i = 0; i < NUM_TASKS; i++) { <ompts:orphan> int myi = i; #pragma omp task <ompts:check>untied</ompts:check> { my_sleep(SLEEPTIME); start_tid[myi] = omp_get_thread_num(); current_tid[myi] = omp_get_thread_num(); #pragma omp taskwait <ompts:check>if((start_tid[myi] %2) !=0){</ompts:check> my_sleep(SLEEPTIME); current_tid[myi] = omp_get_thread_num(); <ompts:check> } /* end of if */ else { current_tid[myi] = omp_get_thread_num(); } </ompts:check> } /*end of omp task */ </ompts:orphan> } /* end of for */ } /* end of single */ } /* end of parallel */ for (i=0;i<NUM_TASKS; i++) { printf("start_tid[%d]=%d, current_tid[%d]=%d\n",i, start_tid[i], i , current_tid[i]); if (current_tid[i] == start_tid[i]) count++; } return (count<NUM_TASKS); } </ompts:testcode> </ompts:test>
GB_binop__ge_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_int64) // A.*B function (eWiseMult): GB (_AemultB_01__ge_int64) // A.*B function (eWiseMult): GB (_AemultB_02__ge_int64) // A.*B function (eWiseMult): GB (_AemultB_03__ge_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int64) // A*D function (colscale): GB (_AxD__ge_int64) // D*A function (rowscale): GB (_DxB__ge_int64) // C+=B function (dense accum): GB (_Cdense_accumB__ge_int64) // C+=b function (dense accum): GB (_Cdense_accumb__ge_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int64) // C=scalar+B GB (_bind1st__ge_int64) // C=scalar+B' GB (_bind1st_tran__ge_int64) // C=A+scalar GB (_bind2nd__ge_int64) // C=A'+scalar GB (_bind2nd_tran__ge_int64) // C type: bool // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_INT64 || GxB_NO_GE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ge_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__ge_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__ge_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
subopt.c
/* * suboptimal folding - Stefan Wuchty, Walter Fontana & Ivo Hofacker * * Vienna RNA package */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <ctype.h> #include <string.h> #include <math.h> #include "ViennaRNA/fold.h" #include "ViennaRNA/constraints/hard.h" #include "ViennaRNA/constraints/soft.h" #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/utils/strings.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/datastructures/lists.h" #include "ViennaRNA/eval.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/cofold.h" #include "ViennaRNA/gquad.h" #include "ViennaRNA/alphabet.h" #include "ViennaRNA/subopt.h" /* hack */ #include "ViennaRNA/color_output.inc" #ifdef _OPENMP #include <omp.h> #endif #define true 1 #define false 0 #ifndef ON_SAME_STRAND #define ON_SAME_STRAND(I, J, C) (((I) >= (C)) || ((J) < (C))) #endif /** * @brief Sequence interval stack element used in subopt.c */ typedef struct INTERVAL { int i; int j; int array_flag; } INTERVAL; typedef struct { char *structure; LIST *Intervals; int partial_energy; int is_duplex; /* int best_energy; */ /* best attainable energy */ } STATE; typedef struct { LIST *Intervals; LIST *Stack; int nopush; } subopt_env; struct old_subopt_dat { unsigned long max_sol; unsigned long n_sol; SOLUTION *SolutionList; FILE *fp; }; /* ################################# # GLOBAL VARIABLES # ################################# */ PUBLIC int subopt_sorted = 0; /* output sorted by energy */ PUBLIC int density_of_states[MAXDOS + 1]; PUBLIC double print_energy = 9999; /* printing threshold for use with logML */ /* ################################# # PRIVATE VARIABLES # ################################# */ /* some backward compatibility stuff */ PRIVATE int backward_compat = 0; PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; #ifdef _OPENMP #pragma omp threadprivate(backward_compat_compound, backward_compat) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY PRIVATE SOLUTION * wrap_subopt(char *seq, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp); #endif PRIVATE void make_pair(int i, int j, STATE *state); /* mark a gquadruplex in the resulting dot-bracket structure */ PRIVATE void make_gquad(int i, int L, int l[3], STATE *state); PRIVATE INTERVAL * make_interval(int i, int j, int ml); PRIVATE STATE * make_state(LIST *Intervals, char *structure, int partial_energy, int is_duplex, int length); PRIVATE STATE * copy_state(STATE *state); PRIVATE void print_state(STATE *state); PRIVATE void UNUSED print_stack(LIST *list); PRIVATE LIST * make_list(void); PRIVATE void push(LIST *list, void *data); PRIVATE void *pop(LIST *list); PRIVATE int best_attainable_energy(vrna_fold_compound_t *vc, STATE *state); PRIVATE void scan_interval(vrna_fold_compound_t *vc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env); PRIVATE void free_interval_node(INTERVAL *node); PRIVATE void free_state_node(STATE *node); PRIVATE void push_back(LIST *Stack, STATE *state); PRIVATE char * get_structure(STATE *state); PRIVATE int compare(const void *solution1, const void *solution2); PRIVATE void make_output(SOLUTION *SL, int cp, FILE *fp); PRIVATE void repeat(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env); PRIVATE void repeat_gquad(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env); PRIVATE void old_subopt_print(const char *structure, float energy, void *data); PRIVATE void old_subopt_store(const char *structure, float energy, void *data); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ /*---------------------------------------------------------------------------*/ /*List routines--------------------------------------------------------------*/ /*---------------------------------------------------------------------------*/ PRIVATE void make_pair(int i, int j, STATE *state) { state->structure[i - 1] = '('; state->structure[j - 1] = ')'; } PRIVATE void make_gquad(int i, int L, int l[3], STATE *state) { int x; for (x = 0; x < L; x++) { state->structure[i - 1 + x] = '+'; state->structure[i - 1 + x + L + l[0]] = '+'; state->structure[i - 1 + x + 2 * L + l[0] + l[1]] = '+'; state->structure[i - 1 + x + 3 * L + l[0] + l[1] + l[2]] = '+'; } } /*---------------------------------------------------------------------------*/ PRIVATE INTERVAL * make_interval(int i, int j, int array_flag) { INTERVAL *interval; interval = lst_newnode(sizeof(INTERVAL)); interval->i = i; interval->j = j; interval->array_flag = array_flag; return interval; } /*---------------------------------------------------------------------------*/ PRIVATE void free_interval_node(INTERVAL *node) { lst_freenode(node); } /*---------------------------------------------------------------------------*/ PRIVATE void free_state_node(STATE *node) { free(node->structure); if (node->Intervals) lst_kill(node->Intervals, lst_freenode); lst_freenode(node); } /*---------------------------------------------------------------------------*/ PRIVATE STATE * make_state(LIST *Intervals, char *structure, int partial_energy, int is_duplex, int length) { STATE *state; state = lst_newnode(sizeof(STATE)); if (Intervals) state->Intervals = Intervals; else state->Intervals = lst_init(); if (structure) { state->structure = structure; } else { int i; state->structure = (char *)vrna_alloc(length + 1); for (i = 0; i < length; i++) state->structure[i] = '.'; } state->partial_energy = partial_energy; return state; } /*---------------------------------------------------------------------------*/ PRIVATE STATE * copy_state(STATE *state) { STATE *new_state; void *after; INTERVAL *new_interval, *next; new_state = lst_newnode(sizeof(STATE)); new_state->Intervals = lst_init(); new_state->partial_energy = state->partial_energy; /* new_state->best_energy = state->best_energy; */ if (state->Intervals->count) { after = LST_HEAD(new_state->Intervals); for (next = lst_first(state->Intervals); next; next = lst_next(next)) { new_interval = lst_newnode(sizeof(INTERVAL)); *new_interval = *next; lst_insertafter(new_state->Intervals, new_interval, after); after = new_interval; } } new_state->structure = strdup(state->structure); if (!new_state->structure) vrna_message_error("out of memory"); return new_state; } /*---------------------------------------------------------------------------*/ /*@unused @*/ PRIVATE void print_state(STATE *state) { INTERVAL *next; if (state->Intervals->count) { printf("%d intervals:\n", state->Intervals->count); for (next = lst_first(state->Intervals); next; next = lst_next(next)) printf("[%d,%d],%d ", next->i, next->j, next->array_flag); printf("\n"); } printf("partial structure: %s\n", state->structure); printf("\n"); printf(" partial_energy: %d\n", state->partial_energy); /* printf(" best_energy: %d\n", state->best_energy); */ (void)fflush(stdout); } /*---------------------------------------------------------------------------*/ /*@unused @*/ PRIVATE void print_stack(LIST *list) { void *rec; printf("================\n"); printf("%d states\n", list->count); for (rec = lst_first(list); rec; rec = lst_next(rec)) { printf("state-----------\n"); print_state(rec); } printf("================\n"); } /*---------------------------------------------------------------------------*/ PRIVATE LIST * make_list(void) { return lst_init(); } /*---------------------------------------------------------------------------*/ PRIVATE void push(LIST *list, void *data) { lst_insertafter(list, data, LST_HEAD(list)); } /* PRIVATE void */ /* push_stack(STATE *state) { */ /* keep the stack sorted by energy */ /* STATE *after, *next; */ /* nopush = false; */ /* next = after = LST_HEAD(Stack); */ /* while ( next = lst_next(next)) { */ /* if ( next->best_energy >= state->best_energy ) break; */ /* after = next; */ /* } */ /* lst_insertafter(Stack, state, after); */ /* } */ /*---------------------------------------------------------------------------*/ PRIVATE void * pop(LIST *list) { void *data; data = lst_deletenext(list, LST_HEAD(list)); return data; } /*---------------------------------------------------------------------------*/ /*auxiliary routines---------------------------------------------------------*/ /*---------------------------------------------------------------------------*/ PRIVATE int best_attainable_energy(vrna_fold_compound_t *vc, STATE *state) { /* evaluation of best possible energy attainable within remaining intervals */ register int sum; INTERVAL *next; vrna_md_t *md; vrna_mx_mfe_t *matrices; int *indx; md = &(vc->params->model_details); matrices = vc->matrices; indx = vc->jindx; sum = state->partial_energy; /* energy of already found elements */ for (next = lst_first(state->Intervals); next; next = lst_next(next)) { if (next->array_flag == 0) sum += (md->circ) ? matrices->Fc : matrices->f5[next->j]; else if (next->array_flag == 1) sum += matrices->fML[indx[next->j] + next->i]; else if (next->array_flag == 2) sum += matrices->c[indx[next->j] + next->i]; else if (next->array_flag == 3) sum += matrices->fM1[indx[next->j] + next->i]; else if (next->array_flag == 4) sum += matrices->fc[next->i]; else if (next->array_flag == 5) sum += matrices->fc[next->j]; else if (next->array_flag == 6) sum += matrices->ggg[indx[next->j] + next->i]; } return sum; } /*---------------------------------------------------------------------------*/ PRIVATE void push_back(LIST *Stack, STATE *state) { push(Stack, copy_state(state)); return; } /*---------------------------------------------------------------------------*/ PRIVATE char * get_structure(STATE *state) { char *structure; structure = strdup(state->structure); return structure; } /*---------------------------------------------------------------------------*/ PRIVATE int compare(const void *solution1, const void *solution2) { if (((SOLUTION *)solution1)->energy > ((SOLUTION *)solution2)->energy) return 1; if (((SOLUTION *)solution1)->energy < ((SOLUTION *)solution2)->energy) return -1; return strcmp(((SOLUTION *)solution1)->structure, ((SOLUTION *)solution2)->structure); } /*---------------------------------------------------------------------------*/ PRIVATE void make_output(SOLUTION *SL, int cp, FILE *fp) /* prints stuff */ { SOLUTION *sol; for (sol = SL; sol->structure != NULL; sol++) { char *e_string = vrna_strdup_printf(" %6.2f", sol->energy); print_structure(fp, sol->structure, e_string); free(e_string); } } PRIVATE STATE * derive_new_state(int i, int j, STATE *s, int e, int flag) { STATE *s_new = copy_state(s); INTERVAL *ival = make_interval(i, j, flag); push(s_new->Intervals, ival); s_new->partial_energy += e; return s_new; } PRIVATE void fork_state(int i, int j, STATE *s, int e, int flag, subopt_env *env) { STATE *s_new = derive_new_state(i, j, s, e, flag); push(env->Stack, s_new); env->nopush = false; } PRIVATE void fork_int_state(int i, int j, int p, int q, STATE *s, int e, subopt_env *env) { STATE *s_new = derive_new_state(p, q, s, e, 2); make_pair(i, j, s_new); make_pair(p, q, s_new); push(env->Stack, s_new); env->nopush = false; } PRIVATE void fork_state_pair(int i, int j, STATE *s, int e, subopt_env *env) { STATE *new_state; new_state = copy_state(s); make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_two_states_pair(int i, int j, int k, STATE *s, int e, int flag1, int flag2, subopt_env *env) { INTERVAL *interval1, *interval2; STATE *new_state; new_state = copy_state(s); interval1 = make_interval(i + 1, k - 1, flag1); interval2 = make_interval(k, j - 1, flag2); if (k - i < j - k) { /* push larger interval first */ push(new_state->Intervals, interval1); push(new_state->Intervals, interval2); } else { push(new_state->Intervals, interval2); push(new_state->Intervals, interval1); } make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_two_states(int i, int j, int p, int q, STATE *s, int e, int flag1, int flag2, subopt_env *env) { INTERVAL *interval1, *interval2; STATE *new_state; new_state = copy_state(s); interval1 = make_interval(i, j, flag1); interval2 = make_interval(p, q, flag2); if ((j - i) < (q - p)) { push(new_state->Intervals, interval1); push(new_state->Intervals, interval2); } else { push(new_state->Intervals, interval2); push(new_state->Intervals, interval1); } new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } /*---------------------------------------------------------------------------*/ /* start of subopt backtracking ---------------------------------------------*/ /*---------------------------------------------------------------------------*/ PUBLIC SOLUTION * vrna_subopt(vrna_fold_compound_t *vc, int delta, int sorted, FILE *fp) { struct old_subopt_dat data; data.SolutionList = NULL; data.max_sol = 128; data.n_sol = 0; data.fp = fp; if (vc) { /* SolutionList stores the suboptimal structures found */ data.SolutionList = (SOLUTION *)vrna_alloc(data.max_sol * sizeof(SOLUTION)); /* end initialize ------------------------------------------------------- */ if (fp) { float min_en; char *SeQ, *energies = NULL; if (vc->cutpoint > 0) min_en = vrna_mfe_dimer(vc, NULL); else min_en = vrna_mfe(vc, NULL); SeQ = vrna_cut_point_insert(vc->sequence, vc->cutpoint); energies = vrna_strdup_printf(" %6.2f %6.2f", min_en, (float)delta / 100.); print_structure(fp, SeQ, energies); free(SeQ); free(energies); vrna_mx_mfe_free(vc); } /* call subopt() */ vrna_subopt_cb(vc, delta, (!sorted && fp) ? &old_subopt_print : &old_subopt_store, (void *)&data); if (sorted) { /* sort structures by energy */ if (data.n_sol > 0) qsort(data.SolutionList, data.n_sol - 1, sizeof(SOLUTION), compare); if (fp) make_output(data.SolutionList, vc->cutpoint, fp); } if (fp) { /* we've printed everything -- free solutions */ SOLUTION *sol; for (sol = data.SolutionList; sol->structure != NULL; sol++) free(sol->structure); free(data.SolutionList); data.SolutionList = NULL; } } return data.SolutionList; } PUBLIC void vrna_subopt_cb(vrna_fold_compound_t *vc, int delta, vrna_subopt_callback *cb, void *data) { subopt_env *env; STATE *state; INTERVAL *interval; int maxlevel, count, partial_energy, old_dangles, logML, dangle_model, length, circular, threshold, cp; double structure_energy, min_en, eprint; char *struc, *structure; float correction; vrna_param_t *P; vrna_md_t *md; int minimal_energy; int Fc; int *f5; vrna_fold_compound_prepare(vc, VRNA_OPTION_MFE | VRNA_OPTION_HYBRID); length = vc->length; cp = vc->cutpoint; P = vc->params; md = &(P->model_details); /* do mfe folding to get fill arrays and get ground state energy */ /* in case dangles is neither 0 or 2, set dangles=2 while folding */ circular = md->circ; logML = md->logML; old_dangles = dangle_model = md->dangles; if (md->uniq_ML != 1) /* failsafe mechanism to enforce valid fM1 array */ md->uniq_ML = 1; /* temporarily set dangles to 2 if necessary */ if ((md->dangles != 0) && (md->dangles != 2)) md->dangles = 2; struc = (char *)vrna_alloc(sizeof(char) * (length + 1)); if (circular) { min_en = vrna_mfe(vc, struc); Fc = vc->matrices->Fc; f5 = vc->matrices->f5; /* restore dangle model */ md->dangles = old_dangles; /* re-evaluate in case we're using logML etc */ min_en = vrna_eval_structure(vc, struc); } else { min_en = vrna_mfe_dimer(vc, struc); f5 = vc->matrices->f5; /* restore dangle model */ md->dangles = old_dangles; /* re-evaluate in case we're using logML etc */ min_en = vrna_eval_structure(vc, struc); } free(struc); eprint = print_energy + min_en; correction = (min_en < 0) ? -0.1 : 0.1; /* Initialize ------------------------------------------------------------ */ maxlevel = 0; count = 0; partial_energy = 0; /* Initialize the stack ------------------------------------------------- */ minimal_energy = (circular) ? Fc : f5[length]; threshold = minimal_energy + delta; if (threshold >= INF) { vrna_message_warning("Energy range too high, limiting to reasonable value"); threshold = INF - EMAX; } /* init env data structure */ env = (subopt_env *)vrna_alloc(sizeof(subopt_env)); env->Stack = NULL; env->nopush = true; env->Stack = make_list(); /* anchor */ env->Intervals = make_list(); /* initial state: */ interval = make_interval(1, length, 0); /* interval [1,length,0] */ push(env->Intervals, interval); env->nopush = false; state = make_state(env->Intervals, NULL, partial_energy, 0, length); /* state->best_energy = minimal_energy; */ push(env->Stack, state); env->nopush = false; /* end initialize ------------------------------------------------------- */ while (1) { /* forever, til nothing remains on stack */ maxlevel = (env->Stack->count > maxlevel ? env->Stack->count : maxlevel); if (LST_EMPTY(env->Stack)) { /* we are done! clean up and quit */ /* fprintf(stderr, "maxlevel: %d\n", maxlevel); */ lst_kill(env->Stack, free_state_node); cb(NULL, 0, data); /* NULL (last time to call callback function */ break; } /* pop the last element ---------------------------------------------- */ state = pop(env->Stack); /* current state to work with */ if (LST_EMPTY(state->Intervals)) { int e; /* state has no intervals left: we got a solution */ count++; structure = get_structure(state); structure_energy = state->partial_energy / 100.; #ifdef CHECK_ENERGY structure_energy = vrna_eval_structure(vc, structure); if (!logML) if ((double)(state->partial_energy / 100.) != structure_energy) { vrna_message_error("%s %6.2f %6.2f", structure, state->partial_energy / 100., structure_energy); exit(1); } #endif if (logML || (dangle_model == 1) || (dangle_model == 3)) /* recalc energy */ structure_energy = vrna_eval_structure(vc, structure); e = (int)((structure_energy - min_en) * 10. - correction); /* avoid rounding errors */ if (e > MAXDOS) e = MAXDOS; density_of_states[e]++; if (structure_energy <= eprint) { char *outstruct = vrna_cut_point_insert(structure, cp); cb((const char *)outstruct, structure_energy, data); free(outstruct); } free(structure); } else { /* get (and remove) next interval of state to analyze */ interval = pop(state->Intervals); scan_interval(vc, interval->i, interval->j, interval->array_flag, threshold, state, env); free_interval_node(interval); /* free the current interval */ } free_state_node(state); /* free the current state */ } /* end of while (1) */ /* cleanup memory */ free(env); } PRIVATE void scan_interval(vrna_fold_compound_t *vc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env) { /* real backtrack routine */ /* array_flag = 0: trace back in f5-array */ /* array_flag = 1: trace back in fML-array */ /* array_flag = 2: trace back in repeat() */ /* array_flag = 3: trace back in fM1-array */ STATE *new_state, *temp_state; INTERVAL *new_interval; vrna_param_t *P; vrna_md_t *md; register int k, fi, cij, ij; register int type; register int dangle_model; register int noLP; int element_energy, best_energy; int *fc, *f5, *c, *fML, *fM1, *ggg; int FcH, FcI, FcM, *fM2; int length, *indx, *rtype, circular, with_gquad, turn, cp; char *ptype; short *S1; unsigned char *hard_constraints, hc_decompose; vrna_hc_t *hc; vrna_sc_t *sc; length = vc->length; cp = vc->cutpoint; indx = vc->jindx; ptype = vc->ptype; S1 = vc->sequence_encoding; P = vc->params; md = &(P->model_details); rtype = &(md->rtype[0]); dangle_model = md->dangles; noLP = md->noLP; circular = md->circ; with_gquad = md->gquad; turn = md->min_loop_size; fc = vc->matrices->fc; f5 = vc->matrices->f5; c = vc->matrices->c; fML = vc->matrices->fML; fM1 = vc->matrices->fM1; ggg = vc->matrices->ggg; FcH = vc->matrices->FcH; FcI = vc->matrices->FcI; FcM = vc->matrices->FcM; fM2 = vc->matrices->fM2; hc = vc->hc; hard_constraints = hc->mx; sc = vc->sc; best_energy = best_attainable_energy(vc, state); /* .. on remaining intervals */ env->nopush = true; if ((i > 1) && (!array_flag)) vrna_message_error("Error while backtracking!"); if (j < i + turn + 1 && ON_SAME_STRAND(i, j, cp)) { /* minimal structure element */ if (array_flag == 0) /* do not forget to add f5[j], since it may contain pseudo energies from soft constraining */ state->partial_energy += f5[j]; if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } ij = indx[j] + i; /* 13131313131313131313131313131313131313131313131313131313131313131313131 */ if (array_flag == 3 || array_flag == 1) { /* array_flag = 3: interval i,j was generated during */ /* a multiloop decomposition using array fM1 in repeat() */ /* or in this block */ /* array_flag = 1: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* or in this block */ if ((hc->up_ml[j]) && (((array_flag == 3) && (fM1[indx[j - 1] + i] != INF)) || (fML[indx[j - 1] + i] != INF))) { if (array_flag == 3) fi = fM1[indx[j - 1] + i] + P->MLbase; else fi = fML[indx[j - 1] + i] + P->MLbase; if (sc) { if (sc->energy_up) fi += sc->energy_up[j][1]; if (sc->f) fi += sc->f(i, j, i, j - 1, VRNA_DECOMP_ML_ML, sc->data); } if ((fi + best_energy <= threshold) && (ON_SAME_STRAND(j - 1, j, cp))) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, P->MLbase, array_flag, env); } hc_decompose = hard_constraints[length * i + j]; if (hc_decompose & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) { /* i,j may pair */ cij = c[ij]; if (cij != INF) { type = vrna_get_ptype(ij, ptype); switch (dangle_model) { case 0: element_energy = E_MLstem(type, -1, -1, P); break; default: element_energy = E_MLstem(type, (((i > 1) && (ON_SAME_STRAND(i - 1, i, cp))) || circular) ? S1[i - 1] : -1, (((j < length) && (ON_SAME_STRAND(j, j + 1, cp))) || circular) ? S1[j + 1] : -1, P); break; } if (sc) { if (sc->f) element_energy += sc->f(i, j, i, j, VRNA_DECOMP_ML_STEM, sc->data); } cij += element_energy; if (cij + best_energy <= threshold) repeat(vc, i, j, state, element_energy, 0, best_energy, threshold, env); } } else if ((with_gquad) && (ggg[ij] != INF)) { element_energy = E_MLstem(0, -1, -1, P); cij = ggg[ij] + element_energy; if (cij + best_energy <= threshold) repeat_gquad(vc, i, j, state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 3 || array_flag == 1 */ /* 11111111111111111111111111111111111111111111111111111111111111111111111 */ if (array_flag == 1) { /* array_flag = 1: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* or in this block */ int stopp, k1j; if ((ON_SAME_STRAND(i - 1, i, cp)) && (ON_SAME_STRAND(j, j + 1, cp))) { /*backtrack in FML only if multiloop is possible*/ for (k = i + turn + 1; k <= j - 1 - turn; k++) { /* Multiloop decomposition if i,j contains more than 1 stack */ if ((with_gquad) && (ON_SAME_STRAND(k, k + 1, cp)) && (fML[indx[k] + i] != INF) && (ggg[indx[j] + k + 1] != INF)) { element_energy = E_MLstem(0, -1, -1, P); if (fML[indx[k] + i] + ggg[indx[j] + k + 1] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k, state, 0, array_flag); env->nopush = false; repeat_gquad(vc, k + 1, j, temp_state, element_energy, fML[indx[k] + i], best_energy, threshold, env); free_state_node(temp_state); } } k1j = indx[j] + k + 1; if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) && (fML[indx[k] + i] != INF) && (c[k1j] != INF)) { short s5, s3; type = vrna_get_ptype(k1j, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (ON_SAME_STRAND(i - 1, i, cp)) ? S1[k] : -1; s3 = (ON_SAME_STRAND(j, j + 1, cp)) ? S1[j + 1] : -1; break; } element_energy = E_MLstem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_ML_ML_STEM, sc->data); } if (ON_SAME_STRAND(k, k + 1, cp)) { if (fML[indx[k] + i] + c[k1j] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k, state, 0, array_flag); env->nopush = false; repeat(vc, k + 1, j, temp_state, element_energy, fML[indx[k] + i], best_energy, threshold, env); free_state_node(temp_state); } } } } } stopp = (cp > 0) ? (cp - 2) : (length); /*if cp -1: k on cut, => no ml*/ stopp = MIN2(stopp, j - 1 - turn); if (i > cp) stopp = j - 1 - turn; else if (i == cp) stopp = 0; /*not a multi loop*/ int up = 1; for (k = i; k <= stopp; k++, up++) { if (hc->up_ml[i] >= up) { k1j = indx[j] + k + 1; /* Multiloop decomposition if i,j contains only 1 stack */ if ((with_gquad) && (ggg[k1j] != INF)) { element_energy = E_MLstem(0, -1, -1, P) + P->MLbase * up; if (sc) if (sc->energy_up) element_energy += sc->energy_up[i][up]; if (ggg[k1j] + element_energy + best_energy <= threshold) repeat_gquad(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env); } if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) && (c[k1j] != INF)) { int s5, s3; type = vrna_get_ptype(k1j, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (ON_SAME_STRAND(k - 1, k, cp)) ? S1[k] : -1; s3 = (ON_SAME_STRAND(j, j + 1, cp)) ? S1[j + 1] : -1; break; } element_energy = E_MLstem(type, s5, s3, P); element_energy += P->MLbase * up; if (sc) { if (sc->energy_up) element_energy += sc->energy_up[i][up]; if (sc->f) element_energy += sc->f(i, j, k + 1, j, VRNA_DECOMP_ML_STEM, sc->data); } if (c[k1j] + element_energy + best_energy <= threshold) repeat(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env); } } } } /* array_flag == 1 */ /* 22222222222222222222222222222222222222222222222222 */ /* */ /* array_flag = 2: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* */ /* 22222222222222222222222222222222222222222222222222 */ if (array_flag == 2) { repeat(vc, i, j, state, 0, 0, best_energy, threshold, env); if (env->nopush) if (!noLP) vrna_message_warning("%d,%d\nOops, no solution in repeat!", i, j); return; } /* 00000000000000000000000000000000000000000000000000 */ /* */ /* array_flag = 0: interval i,j was found while */ /* tracing back through f5-array and c-array */ /* or within this block */ /* */ /* 00000000000000000000000000000000000000000000000000 */ if ((array_flag == 0) && !circular) { int s5, s3, kj, tmp_en; if ((hc->up_ext[j]) && (f5[j - 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[j][1]; if (sc->f) tmp_en += sc->f(1, j, 1, j - 1, VRNA_DECOMP_EXT_EXT, sc->data); } if (f5[j - 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, tmp_en, 0, env); } for (k = j - turn - 1; k > 1; k--) { kj = indx[j] + k; if ((with_gquad) && (ON_SAME_STRAND(k, j, cp)) && (f5[k - 1] != INF) && (ggg[kj] != INF)) { element_energy = 0; if (f5[k - 1] + ggg[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(1, k - 1, state, 0, 0); env->nopush = false; /* backtrace the quadruplex */ repeat_gquad(vc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (f5[k - 1] != INF) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); /* k and j pair */ switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (ON_SAME_STRAND(k - 1, k, cp)) ? S1[k - 1] : -1; s3 = ((j < length) && (ON_SAME_STRAND(j, j + 1, cp))) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (!(ON_SAME_STRAND(k, j, cp))) /*&&(state->is_duplex==0))*/ element_energy += P->DuplexInit; /*state->is_duplex=1;*/ if (sc) { if (sc->f) element_energy += sc->f(1, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data); } if (f5[k - 1] + c[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(1, k - 1, state, 0, 0); env->nopush = false; repeat(vc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } } kj = indx[j] + 1; if ((with_gquad) && (ON_SAME_STRAND(k, j, cp)) && (ggg[kj] != INF)) { element_energy = 0; if (ggg[kj] + element_energy + best_energy <= threshold) /* backtrace the quadruplex */ repeat_gquad(vc, 1, j, state, element_energy, 0, best_energy, threshold, env); } if ((hard_constraints[length + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); s5 = -1; switch (dangle_model) { case 0: s3 = -1; break; default: s3 = (j < length) && (ON_SAME_STRAND(j, j + 1, cp)) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (!(ON_SAME_STRAND(1, j, cp))) element_energy += P->DuplexInit; if (sc) { if (sc->f) element_energy += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_STEM, sc->data); } if (c[kj] + element_energy + best_energy <= threshold) repeat(vc, 1, j, state, element_energy, 0, best_energy, threshold, env); } } /* end array_flag == 0 && !circular*/ /* or do we subopt circular? */ else if (array_flag == 0) { int k, l, p, q, tmp_en; /* if we've done everything right, we will never reach this case more than once */ /* right after the initilization of the stack with ([1,n], empty, 0) */ /* lets check, if we can have an open chain without breaking the threshold */ /* this is an ugly work-arround cause in case of an open chain we do not have to */ /* backtrack anything further... */ if (hc->up_ext[1] >= length) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[1][length]; if (sc->f) tmp_en += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_UP, sc->data); } if (tmp_en <= threshold) { new_state = derive_new_state(1, 2, state, 0, 0); new_state->partial_energy = 0; push(env->Stack, new_state); env->nopush = false; } } /* ok, lets check if we can do an exterior hairpin without breaking the threshold */ /* best energy should be 0 if we are here */ if (FcH + best_energy <= threshold) { /* lets search for all exterior hairpin cases, that fit into our threshold barrier */ /* we use index k,l to avoid confusion with i,j index of our state... */ /* if we reach here, i should be 1 and j should be n respectively */ for (k = i; k < j; k++) { if (hc->up_hp[1] < k) break; for (l = j; l >= k + turn + 1; l--) { int kl, tmpE; kl = indx[l] + k; if (c[kl] != INF) { tmpE = vrna_E_hp_loop(vc, l, k); if (c[kl] + tmpE + best_energy <= threshold) { /* what we really have to do is something like this, isn't it? */ /* we have to create a new state, with interval [k,l], then we */ /* add our loop energy as initial energy of this state and put */ /* the state onto the stack R... for further refinement... */ /* we also denote this new interval to be scanned in C */ fork_state(k, l, state, tmpE, 2, env); } } } } } /* now lets see, if we can do an exterior interior loop without breaking the threshold */ if (FcI + best_energy <= threshold) { /* now we search for our exterior interior loop possibilities */ for (k = i; k < j; k++) { for (l = j; l >= k + turn + 1; l--) { int kl, type, tmpE; kl = indx[l] + k; /* just confusing these indices ;-) */ if ((hard_constraints[length * k + l] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) && (c[kl] != INF)) { type = rtype[vrna_get_ptype(kl, ptype)]; for (p = l + 1; p < j; p++) { int u1, qmin; u1 = p - l - 1; if (u1 + k - 1 > MAXLOOP) break; if (hc->up_int[l + 1] < u1) break; qmin = u1 + k - 1 + j - MAXLOOP; if (qmin < p + turn + 1) qmin = p + turn + 1; for (q = j; q >= qmin; q--) { int u2, type_2; if (hc->up_int[q + 1] < (j - q + k - 1)) break; if ((hard_constraints[length * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) && (c[indx[q] + p] != INF)) { type_2 = rtype[vrna_get_ptype(indx[q] + p, ptype)]; u2 = k - 1 + j - q; if (u1 + u2 > MAXLOOP) continue; tmpE = E_IntLoop(u1, u2, type, type_2, S1[l + 1], S1[k - 1], S1[p - 1], S1[q + 1], P); if (sc) { if (sc->energy_up) tmpE += sc->energy_up[l + 1][p - l - 1] + sc->energy_up[q + 1][j - q] + sc->energy_up[1][k - 1]; if (sc->energy_stack) { if (u1 + u2 == 0) { tmpE += sc->energy_stack[k] + sc->energy_stack[l] + sc->energy_stack[p] + sc->energy_stack[q]; } } } if (c[kl] + c[indx[q] + p] + tmpE + best_energy <= threshold) { /* ok, similar to the hairpin stuff, we add new states onto the stack R */ /* but in contrast to the hairpin decomposition, we have to add two new */ /* intervals, enclosed by k,l and p,q respectively and we also have to */ /* add the partial energy, that comes from the exterior interior loop */ fork_two_states(k, l, p, q, state, tmpE, 2, 2, env); } } } } } } } } /* and last but not least, we have a look, if we can do an exterior multiloop within the energy threshold */ if (FcM <= threshold) { /* this decomposition will be somehow more complicated...so lets see what we do here... */ /* first we want to find out which split inidices we can use without exceeding the threshold */ int tmpE2; for (k = turn + 1; k < j - 2 * turn; k++) { if ((fML[indx[k] + 1] != INF) && (fM2[k + 1] != INF)) { tmpE2 = fML[indx[k] + 1] + fM2[k + 1] + P->MLclosing; if (tmpE2 + best_energy <= threshold) { /* grmpfh, we have found a possible split index k so we have to split fM2 and fML now */ /* lets do it first in fM2 anyway */ for (l = k + turn + 2; l < j - turn - 1; l++) { tmpE2 = fM1[indx[l] + k + 1] + fM1[indx[j] + l + 1]; if (tmpE2 + fML[indx[k] + 1] + P->MLclosing <= threshold) { /* we've (hopefully) found a valid decomposition of fM2 and therefor we have all */ /* three intervals for our new state to be pushed on stack R */ new_state = copy_state(state); /* first interval leads for search in fML array */ new_interval = make_interval(1, k, 1); push(new_state->Intervals, new_interval); env->nopush = false; /* next, we have the first interval that has to be traced in fM1 */ new_interval = make_interval(k + 1, l, 3); push(new_state->Intervals, new_interval); env->nopush = false; /* and the last of our three intervals is also one to be traced within fM1 array... */ new_interval = make_interval(l + 1, j, 3); push(new_state->Intervals, new_interval); env->nopush = false; /* mmh, we add the energy for closing the multiloop now... */ new_state->partial_energy += P->MLclosing; /* next we push our state onto the R stack */ push(env->Stack, new_state); env->nopush = false; } /* else we search further... */ } /* ok, we have to decompose fML now... */ } } } } } /* thats all folks for the circular case... */ /* 44444444444444444444444444444444444444444444444444 */ /* */ /* array_flag = 4: interval i,j was found while */ /* tracing back through fc-array smaller than than cp */ /* or within this block */ /* */ /* 44444444444444444444444444444444444444444444444444 */ if (array_flag == 4) { int ik, s5, s3, tmp_en; if ((hc->up_ext[i]) && (fc[i + 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[i][1]; if (sc->f) tmp_en += sc->f(i, j, i + 1, j, VRNA_DECOMP_EXT_EXT, sc->data); } if (fc[i + 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 5'-end */ fork_state(i + 1, j, state, tmp_en, 4, env); } for (k = i + TURN + 1; k < j; k++) { ik = indx[k] + i; if ((with_gquad) && (fc[k + 1] != INF) && (ggg[ik] != INF)) { if (fc[k + 1] + ggg[ik] + best_energy <= threshold) { temp_state = derive_new_state(k + 1, j, state, 0, 4); env->nopush = false; repeat_gquad(vc, i, k, temp_state, 0, fc[k + 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * i + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[k + 1] != INF) && (c[ik] != INF)) { type = vrna_get_ptype(ik, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (i > 1) ? S1[i - 1] : -1; s3 = S1[k + 1]; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_EXT_STEM_EXT, sc->data); } if (fc[k + 1] + c[ik] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(k + 1, j, state, 0, 4); env->nopush = false; repeat(vc, i, k, temp_state, element_energy, fc[k + 1], best_energy, threshold, env); free_state_node(temp_state); } } } ik = indx[cp - 1] + i; /* indx[j] + i; */ if ((with_gquad) && (ggg[ik] != INF)) if (ggg[ik] + best_energy <= threshold) repeat_gquad(vc, i, cp - 1, state, 0, 0, best_energy, threshold, env); if ((hard_constraints[length * i + cp - 1] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[ik] != INF)) { type = vrna_get_ptype(ik, ptype); s3 = -1; switch (dangle_model) { case 0: s5 = -1; break; default: s5 = (i > 1) ? S1[i - 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, cp - 1, i, cp - 1, VRNA_DECOMP_EXT_STEM, sc->data); } if (c[ik] + element_energy + best_energy <= threshold) repeat(vc, i, cp - 1, state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 4 */ /* 55555555555555555555555555555555555555555555555555 */ /* */ /* array_flag = 5: interval cp=i,j was found while */ /* tracing back through fc-array greater than cp */ /* or within this block */ /* */ /* 55555555555555555555555555555555555555555555555555 */ if (array_flag == 5) { int kj, s5, s3, tmp_en; if ((hc->up_ext[j]) && (fc[j - 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[j][1]; if (sc->f) tmp_en += sc->f(i, j, i, j - 1, VRNA_DECOMP_EXT_EXT, sc->data); } if (fc[j - 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, tmp_en, 5, env); } for (k = j - TURN - 1; k > i; k--) { kj = indx[j] + k; if ((with_gquad) && (fc[k - 1] != INF) && (ggg[kj] != INF)) { if (fc[k - 1] + ggg[kj] + best_energy <= threshold) { temp_state = derive_new_state(i, k - 1, state, 0, 5); env->nopush = false; repeat_gquad(vc, k, j, temp_state, 0, fc[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[k - 1] != INF) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); element_energy = 0; switch (dangle_model) { case 0: s3 = s5 = -1; break; default: s5 = S1[k - 1]; s3 = (j < length) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data); } if (fc[k - 1] + c[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k - 1, state, 0, 5); env->nopush = false; repeat(vc, k, j, temp_state, element_energy, fc[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } } kj = indx[j] + cp; /* indx[j] + i; */ if ((with_gquad) && (ggg[kj] != INF)) if (ggg[kj] + best_energy <= threshold) repeat_gquad(vc, cp, j, state, 0, 0, best_energy, threshold, env); if ((hard_constraints[length * cp + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); s5 = -1; switch (dangle_model) { case 0: s3 = -1; break; default: s3 = (j < length) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(cp, j, cp, j, VRNA_DECOMP_EXT_STEM, sc->data); } if (c[kj] + element_energy + best_energy <= threshold) repeat(vc, cp, j, state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 5 */ if (array_flag == 6) { /* we have a gquad */ repeat_gquad(vc, i, j, state, 0, 0, best_energy, threshold, env); if (env->nopush) vrna_message_warning("%d,%d\nOops, no solution in gquad-repeat!", i, j); return; } if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } /*---------------------------------------------------------------------------*/ PRIVATE void repeat_gquad(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env) { int *ggg, *indx, element_energy, cp; short *S1; vrna_param_t *P; indx = vc->jindx; cp = vc->cutpoint; ggg = vc->matrices->ggg; S1 = vc->sequence_encoding; P = vc->params; /* find all gquads that fit into the energy range and the interval [i,j] */ STATE *new_state; best_energy += part_energy; /* energy of current structural element */ best_energy += temp_energy; /* energy from unpushed interval */ if (ON_SAME_STRAND(i, j, cp)) { element_energy = ggg[indx[j] + i]; if ((element_energy != INF) && (element_energy + best_energy <= threshold)) { int cnt; int *L; int *l; /* find out how many gquads we might expect in the interval [i,j] */ int num_gquads = get_gquad_count(S1, i, j); num_gquads++; L = (int *)vrna_alloc(sizeof(int) * num_gquads); l = (int *)vrna_alloc(sizeof(int) * num_gquads * 3); L[0] = -1; get_gquad_pattern_exhaustive(S1, i, j, P, L, l, threshold - best_energy); for (cnt = 0; L[cnt] != -1; cnt++) { new_state = copy_state(state); make_gquad(i, L[cnt], &(l[3 * cnt]), new_state); new_state->partial_energy += part_energy; new_state->partial_energy += element_energy; /* new_state->best_energy = * hairpin[unpaired] + element_energy + best_energy; */ push(env->Stack, new_state); env->nopush = false; } free(L); free(l); } } best_energy -= part_energy; best_energy -= temp_energy; return; } PRIVATE void repeat(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env) { /* routine to find stacks, bulges, internal loops and multiloops */ /* within interval closed by basepair i,j */ STATE *new_state; vrna_param_t *P; vrna_md_t *md; register int ij, k, p, q, energy, new; register int mm; register int no_close, type, type_2; char *ptype; unsigned int n; int element_energy; int *fc, *c, *fML, *fM1, *ggg; int rt, *indx, *rtype, noGUclosure, noLP, with_gquad, dangle_model, turn, cp; short *S1; vrna_hc_t *hc; vrna_sc_t *sc; n = vc->length; S1 = vc->sequence_encoding; ptype = vc->ptype; indx = vc->jindx; cp = vc->cutpoint; P = vc->params; md = &(P->model_details); rtype = &(md->rtype[0]); noGUclosure = md->noGUclosure; noLP = md->noLP; with_gquad = md->gquad; dangle_model = md->dangles; turn = md->min_loop_size; fc = vc->matrices->fc; c = vc->matrices->c; fML = vc->matrices->fML; fM1 = vc->matrices->fM1; ggg = vc->matrices->ggg; hc = vc->hc; sc = vc->sc; ij = indx[j] + i; type = vrna_get_ptype(ij, ptype); /* * if (type==0) fprintf(stderr, "repeat: Warning: %d %d can't pair\n", i,j); */ no_close = (((type == 3) || (type == 4)) && noGUclosure); if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) { if (noLP) { /* always consider the structure with additional stack */ if (i + turn + 2 < j) { if (hc->mx[n * (i + 1) + j - 1] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) { type_2 = rtype[vrna_get_ptype(indx[j - 1] + i + 1, ptype)]; energy = 0; if (ON_SAME_STRAND(i, i + 1, cp) && ON_SAME_STRAND(j - 1, j, cp)) { energy = E_IntLoop(0, 0, type, type_2, S1[i + 1], S1[j - 1], S1[i + 1], S1[j - 1], P); if (sc) { if (sc->energy_bp) energy += sc->energy_bp[ij]; if (sc->energy_stack) { energy += sc->energy_stack[i] + sc->energy_stack[i + 1] + sc->energy_stack[j - 1] + sc->energy_stack[j]; } if (sc->f) energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_IL, sc->data); } new_state = derive_new_state(i + 1, j - 1, state, part_energy + energy, 2); make_pair(i, j, new_state); make_pair(i + 1, j - 1, new_state); /* new_state->best_energy = new + best_energy; */ push(env->Stack, new_state); env->nopush = false; if (i == 1 || state->structure[i - 2] != '(' || state->structure[j] != ')') /* adding a stack is the only possible structure */ return; } } } } } best_energy += part_energy; /* energy of current structural element */ best_energy += temp_energy; /* energy from unpushed interval */ if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) { for (p = i + 1; p <= MIN2(j - 2 - turn, i + MAXLOOP + 1); p++) { int minq = j - i + p - MAXLOOP - 2; if (minq < p + 1 + turn) minq = p + 1 + turn; if (hc->up_int[i + 1] < (p - i - 1)) break; for (q = j - 1; q >= minq; q--) { if (hc->up_int[q + 1] < (j - q - 1)) break; /* skip stack if noLP, since we've already processed it above */ if ((noLP) && (p == i + 1) && (q == j - 1)) continue; if (!(hc->mx[n * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC)) continue; if (c[indx[q] + p] == INF) continue; type_2 = vrna_get_ptype(indx[q] + p, ptype); if (noGUclosure) if (no_close || (type_2 == 3) || (type_2 == 4)) if ((p > i + 1) || (q < j - 1)) continue; /* continue unless stack */ if (ON_SAME_STRAND(i, p, cp) && ON_SAME_STRAND(q, j, cp)) { energy = E_IntLoop(p - i - 1, j - q - 1, type, rtype[type_2], S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P); new = energy + c[indx[q] + p]; if (sc) { if (sc->energy_up) energy += sc->energy_up[i + 1][p - i - 1] + sc->energy_up[q + 1][j - q - 1]; if (sc->energy_bp) energy += sc->energy_bp[ij]; if (sc->energy_stack) { if ((p == i + 1) && (q == j - 1)) { energy += sc->energy_stack[i] + sc->energy_stack[p] + sc->energy_stack[q] + sc->energy_stack[j]; } } if (sc->f) energy += sc->f(i, j, p, q, VRNA_DECOMP_PAIR_IL, sc->data); } new = energy + c[indx[q] + p]; if (new + best_energy <= threshold) /* stack, bulge, or interior loop */ fork_int_state(i, j, p, q, state, part_energy + energy, env); } /*end of if block */ } /* end of q-loop */ } /* end of p-loop */ } if (!ON_SAME_STRAND(i, j, cp)) { /*look in fc*/ if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[i + 1] != INF) && (fc[j - 1] != INF)) { rt = rtype[type]; element_energy = 0; switch (dangle_model) { case 0: element_energy = vrna_E_ext_stem(rt, -1, -1, P); break; default: element_energy = vrna_E_ext_stem(rt, (ON_SAME_STRAND(j - 1, j, cp)) ? S1[j - 1] : -1, (ON_SAME_STRAND(i, i + 1, cp)) ? S1[i + 1] : -1, P); break; } if (fc[i + 1] + fc[j - 1] + element_energy + best_energy <= threshold) fork_two_states_pair(i, j, cp, state, part_energy + element_energy, 4, 5, env); } } mm = P->MLclosing; rt = rtype[type]; if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) && (i != cp - 1) && (j != cp)) { element_energy = mm; switch (dangle_model) { case 0: element_energy = E_MLstem(rt, -1, -1, P) + mm; break; default: element_energy = E_MLstem(rt, S1[j - 1], S1[i + 1], P) + mm; break; } if (sc) { if (sc->energy_bp) element_energy += sc->energy_bp[ij]; if (sc->f) element_energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_ML, sc->data); } /* multiloop decomposition */ if ((sc) && (sc->f)) { for (k = i + turn + 2; k <= j - turn - 2; k++) { int eee = fML[indx[k - 1] + i + 1]; if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) { eee += fM1[indx[j - 1] + k] + best_energy; int aux_eee = element_energy + sc->f(i + 1, j - 1, k - 1, k, VRNA_DECOMP_ML_ML_ML, sc->data); if ((eee + aux_eee) <= threshold) fork_two_states_pair(i, j, k, state, part_energy + aux_eee, 1, 3, env); } } } else { for (k = i + turn + 2; k <= j - turn - 2; k++) { int eee = fML[indx[k - 1] + i + 1]; if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) { /* multiloop decomposition */ if ((eee + fM1[indx[j - 1] + k] + element_energy + best_energy) <= threshold) fork_two_states_pair(i, j, k, state, part_energy + element_energy, 1, 3, env); } } } } if (ON_SAME_STRAND(i, j, cp)) { if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_HP_LOOP) && (!no_close)) { element_energy = vrna_E_hp_loop(vc, i, j); if (element_energy != INF) { if (element_energy + best_energy <= threshold) /* hairpin structure */ fork_state_pair(i, j, state, part_energy + element_energy, env); } } if (with_gquad) { /* now we have to find all loops where (i,j) encloses a gquad in an interior loops style */ int cnt, *p, *q, *en, tmp_en; p = q = en = NULL; en = E_GQuad_IntLoop_exhaustive(i, j, &p, &q, type, S1, ggg, threshold - best_energy, indx, P); for (cnt = 0; p[cnt] != -1; cnt++) { if ((hc->up_int[i + 1] >= p[cnt] - i - 1) && (hc->up_int[q[cnt] + 1] >= j - q[cnt] - 1)) { tmp_en = en[cnt]; if (sc) { if (sc->energy_bp) tmp_en += sc->energy_bp[ij]; if (sc->energy_up) tmp_en += sc->energy_up[i + 1][p[cnt] - i - 1] + sc->energy_up[q[cnt] + 1][j - q[cnt] - 1]; } new_state = derive_new_state(p[cnt], q[cnt], state, tmp_en + part_energy, 6); make_pair(i, j, new_state); /* new_state->best_energy = new + best_energy; */ push(env->Stack, new_state); env->nopush = false; } } free(en); free(p); free(q); } } best_energy -= part_energy; best_energy -= temp_energy; return; } PRIVATE void old_subopt_print(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; if (structure && d->fp) { char *e_string = vrna_strdup_printf(" %6.2f", energy); print_structure(d->fp, structure, e_string); free(e_string); } } PRIVATE void old_subopt_store(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; /* store solution */ if (d->n_sol + 1 == d->max_sol) { d->max_sol *= 2; d->SolutionList = (SOLUTION *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(SOLUTION)); } if (structure) { d->SolutionList[d->n_sol].energy = energy; d->SolutionList[d->n_sol++].structure = strdup(structure); } else { d->SolutionList[d->n_sol].energy = 0; d->SolutionList[d->n_sol++].structure = NULL; } } /*###########################################*/ /*# deprecated functions below #*/ /*###########################################*/ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY PUBLIC SOLUTION * subopt(char *seq, char *structure, int delta, FILE *fp) { return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 0, fp); } PUBLIC SOLUTION * subopt_circ(char *seq, char *structure, int delta, FILE *fp) { return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 1, fp); } PUBLIC SOLUTION * subopt_par(char *seq, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp) { return wrap_subopt(seq, structure, parameters, delta, is_constrained, is_circular, fp); } PRIVATE SOLUTION * wrap_subopt(char *string, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp) { vrna_fold_compound_t *vc; vrna_param_t *P; char *seq; #ifdef _OPENMP /* Explicitly turn off dynamic threads */ omp_set_dynamic(0); #endif /* we need the parameter structure for hard constraints */ if (parameters) { P = vrna_params_copy(parameters); } else { vrna_md_t md; set_model_details(&md); md.temperature = temperature; P = vrna_params(&md); } P->model_details.circ = is_circular; P->model_details.uniq_ML = uniq_ML = 1; /* what about cofold sequences here? Is it safe to call the below cut_point_insert() ? */ /* dirty hack to reinsert the '&' according to the global variable 'cut_point' */ seq = vrna_cut_point_insert(string, cut_point); vc = vrna_fold_compound(seq, &(P->model_details), ((is_circular == 0) ? VRNA_OPTION_HYBRID : VRNA_OPTION_DEFAULT)); if (parameters) { /* replace params if necessary */ free(vc->params); vc->params = P; } else { free(P); } /* handle hard constraints in pseudo dot-bracket format if passed via simple interface */ if (is_constrained && structure) { unsigned int constraint_options = 0; constraint_options |= VRNA_CONSTRAINT_DB | VRNA_CONSTRAINT_DB_PIPE | VRNA_CONSTRAINT_DB_DOT | VRNA_CONSTRAINT_DB_X | VRNA_CONSTRAINT_DB_ANG_BRACK | VRNA_CONSTRAINT_DB_RND_BRACK | VRNA_CONSTRAINT_DB_INTRAMOL | VRNA_CONSTRAINT_DB_INTERMOL; vrna_constraints_add(vc, (const char *)structure, constraint_options); } if (backward_compat_compound && backward_compat) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; /* cleanup */ free(seq); return vrna_subopt(vc, delta, subopt_sorted, fp); } #endif /*---------------------------------------------------------------------------*/ /* Well, that is the end!----------------------------------------------------*/ /*---------------------------------------------------------------------------*/
transpose.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_TRANSPOSE_H_ #define MACE_KERNELS_TRANSPOSE_H_ #if defined(MACE_ENABLE_NEON) #include <arm_neon.h> #endif #include <vector> #include <algorithm> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/public/mace.h" #include "mace/utils/utils.h" namespace mace { namespace kernels { static void TransposeNHWCToNCHWC3(const float *input, float *output, const index_t height, const index_t width) { index_t image_size = height * width; #pragma omp parallel for for (index_t h = 0; h < height; ++h) { index_t in_offset = h * width * 3; index_t out_offset = h * width; #if defined(MACE_ENABLE_NEON) index_t w; for (w = 0; w + 3 < width; w += 4) { float32x4x3_t vi = vld3q_f32(input + in_offset); vst1q_f32(output + out_offset, vi.val[0]); vst1q_f32(output + out_offset + image_size, vi.val[1]); vst1q_f32(output + out_offset + image_size * 2, vi.val[2]); in_offset += 12; out_offset += 4; } for (; w < width; ++w) { for (index_t c = 0; c < 3; ++c) { output[h * width + image_size * c + w] = input[h * width * 3 + w * 3 + c]; } } #else for (index_t w = 0; w < width; ++w) { for (index_t c = 0; c < 3; ++c) { output[out_offset + c * image_size + w] = input[in_offset + w * 3 + c]; } } #endif } } static void TransposeNCHWToNHWCC2(const float *input, float *output, const index_t height, const index_t width) { index_t image_size = height * width; #pragma omp parallel for for (index_t h = 0; h < height; ++h) { index_t in_offset = h * width; index_t out_offset = h * width * 2; #if defined(MACE_ENABLE_NEON) index_t w; for (w = 0; w + 3 < width; w += 4) { float32x4_t vi0 = vld1q_f32(input + in_offset); float32x4_t vi1 = vld1q_f32(input + in_offset + image_size); float32x4x2_t vi = {vi0, vi1}; vst2q_f32(output + out_offset, vi); in_offset += 4; out_offset += 8; } for (; w < width; ++w) { for (index_t c = 0; c < 2; ++c) { output[h * width * 2 + w * 2 + c] = input[h * width + image_size * c + w]; } } #else for (index_t w = 0; w < width; ++w) { for (index_t c = 0; c < 2; ++c) { output[out_offset + w * 2 + c] = input[in_offset + c * image_size + w]; } } #endif } } template<DeviceType D, typename T> struct TransposeFunctor : OpKernel { TransposeFunctor(OpKernelContext *context, const std::vector<int> &dims) : OpKernel(context), dims_(dims) {} MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); Tensor::MappingGuard input_guard(input); Tensor::MappingGuard output_guard(output); const std::vector<index_t> &input_shape = input->shape(); const std::vector<index_t> &output_shape = output->shape(); const T *input_data = input->data<T>(); T *output_data = output->mutable_data<T>(); if (input->dim_size() == 2) { MACE_CHECK(dims_[0] == 1 && dims_[1] == 0, "no need transform"); index_t stride_i = input_shape[0]; index_t stride_j = input_shape[1]; index_t tile_size = input_shape[0] > 512 || input_shape[1] > 512 ? 64 : 32; #pragma omp parallel for collapse(2) for (index_t i = 0; i < input_shape[0]; i += tile_size) { for (index_t j = 0; j < input_shape[1]; j += tile_size) { index_t end_i = std::min(i + tile_size, input_shape[0]); index_t end_j = std::min(j + tile_size, input_shape[1]); for (index_t tile_i = i; tile_i < end_i; ++tile_i) { for (index_t tile_j = j; tile_j < end_j; ++tile_j) { output_data[tile_j * stride_i + tile_i] = input_data[tile_i * stride_j + tile_j]; } } } } } else if (input->dim_size() == 4) { std::vector<int> transpose_order_from_NHWC_to_NCHW{0, 3, 1, 2}; std::vector<int> transpose_order_from_NCHW_to_NHWC{0, 2, 3, 1}; index_t batch_size = input->dim(1) * input->dim(2) * input->dim(3); if (dims_ == transpose_order_from_NHWC_to_NCHW && input->dim(3) == 3) { for (index_t b = 0; b < input->dim(0); ++b) { TransposeNHWCToNCHWC3(input_data + b * batch_size, output_data + b * batch_size, input->dim(1), input->dim(2)); } } else if (dims_ == transpose_order_from_NCHW_to_NHWC && input->dim(1) == 2) { for (index_t b = 0; b < input->dim(0); ++b) { TransposeNCHWToNHWCC2(input_data + b * batch_size, output_data + b * batch_size, input->dim(2), input->dim(3)); } } else { std::vector<index_t> in_stride{input_shape[1] * input_shape[2] * input_shape[3], input_shape[2] * input_shape[3], input_shape[3], 1}; std::vector<index_t> out_stride{output_shape[1] * output_shape[2] * output_shape[3], output_shape[2] * output_shape[3], output_shape[3], 1}; std::vector<index_t> idim(4, 0); std::vector<index_t> odim(4, 0); for (odim[0] = 0; odim[0] < output_shape[0]; ++odim[0]) { for (odim[1] = 0; odim[1] < output_shape[1]; ++odim[1]) { for (odim[2] = 0; odim[2] < output_shape[2]; ++odim[2]) { for (odim[3] = 0; odim[3] < output_shape[3]; ++odim[3]) { idim[dims_[0]] = odim[0]; idim[dims_[1]] = odim[1]; idim[dims_[2]] = odim[2]; idim[dims_[3]] = odim[3]; output_data[odim[0] * out_stride[0] + odim[1] * out_stride[1] + odim[2] * out_stride[2] + odim[3]] = input_data[idim[0] * in_stride[0] + idim[1] * in_stride[1] + idim[2] * in_stride[2] + idim[3]]; } } } } } } else { MACE_NOT_IMPLEMENTED; } return MACE_SUCCESS; } std::vector<int> dims_; }; } // namespace kernels } // namespace mace #endif // MACE_KERNELS_TRANSPOSE_H_
move_shallow_water_particle_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Miguel Maso Sotomayor // Pablo Becker // #ifndef KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED #define KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED ///@defgroup MoveShallowWaterParticleUtility ///@brief Utility to move particles on the eulerian mesh with an /// explicit scheme. This is the basic tool of the pfem2 framework // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" #include "includes/checks.h" #include "includes/dof.h" #include "includes/variables.h" #include "containers/array_1d.h" #include "containers/data_value_container.h" #include "includes/mesh.h" #include "utilities/math_utils.h" #include "includes/global_pointer_variables.h" #include "processes/node_erase_process.h" #include "utilities/geometry_utilities.h" #include "includes/model_part.h" #include "includes/kratos_parameters.h" #include "spatial_containers/spatial_containers.h" #include "spatial_containers/cell.h" #include "spatial_containers/bins_dynamic_objects.h" #include "utilities/spatial_containers_configure.h" #include "geometries/line_2d_2.h" #include "geometries/triangle_2d_3.h" #include "geometries/triangle_3d_3.h" #include "geometries/point.h" #include "shallow_water_application_variables.h" #include "shallow_water_particle.h" #include "utilities/openmp_utils.h" #include "time.h" //#include "processes/process.h" namespace Kratos { //this class is to be modified by the user to customize the interpolation process template< unsigned int TDim> class MoveShallowWaterParticleUtility { public: typedef SpatialContainersConfigure<TDim> Configure; typedef typename Configure::PointType PointType; typedef typename Configure::ContainerType ContainerType; typedef typename Configure::IteratorType IteratorType; typedef typename Configure::ResultContainerType ResultContainerType; typedef typename Configure::ResultIteratorType ResultIteratorType; typedef PointerVector< ShallowParticle, ShallowParticle*, std::vector<ShallowParticle*> > ParticlePointerVector; KRATOS_CLASS_POINTER_DEFINITION(MoveShallowWaterParticleUtility); //template<unsigned int TDim> MoveShallowWaterParticleUtility(ModelPart& rModelPart, Parameters rParameters) : mrModelPart(rModelPart), mScalarVar1(&KratosComponents< Variable<double> >::Get( rParameters["convection_scalar_variable"].GetString() ) ), mVectorVar1(&KratosComponents< Variable<array_1d<double,3> > >::Get( rParameters["convection_vector_variable"].GetString() ) ) { KRATOS_TRY std::cout << "Initializing moveparticle utility for scalar transport" << std::endl; Parameters default_parameters( R"( { "convection_scalar_variable" : "HEIGHT", "convection_vector_variable" : "VELOCITY", "maximum_number_of_particles" : 16 } )" ); // Now validate agains defaults -- this also ensures no type mismatch rParameters.ValidateAndAssignDefaults(default_parameters); m_scalar_var1_name = rParameters["convection_scalar_variable"].GetString(); m_vector_var1_name = rParameters["convection_vector_variable"].GetString(); mMaxNumberOfParticles = rParameters["maximum_number_of_particles"].GetDouble(); Check(); //storing water and air density and their inverses, just in case it is needed for the streamline integration //loop in elements to change their ID to their position in the array. Easier to get information later. //DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!! ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ielem->SetId(ii+1); } mLastElemId= (mrModelPart.ElementsEnd()-1)->Id(); int node_id=0; // we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used) ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator pnode = inodebegin+ii; array_1d<double,3> position_node; double distance=0.0; position_node = pnode->Coordinates(); GlobalPointersVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES); //we loop all the nodes to check all the edges const double number_of_neighbours = static_cast<double>(rneigh.size()); for( GlobalPointersVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++) { array_1d<double,3> position_difference; position_difference = inode->Coordinates() - position_node; const double current_distance = norm_2( position_difference ); distance += current_distance / number_of_neighbours; } //and we save the largest edge. pnode->SetValue(MEAN_SIZE, distance); node_id=pnode->GetId(); } } mLastNodeId=node_id; //we also calculate the element mean size in the same way, for the courant number //also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; double elem_size; array_1d<double,3> Edge(3,0.0); Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates(); elem_size = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) elem_size += Edge[d]*Edge[d]; for (unsigned int i = 2; i < (TDim+1); i++) for(unsigned int j = 0; j < i; j++) { Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates(); double Length = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) Length += Edge[d]*Edge[d]; if (Length < elem_size) elem_size = Length; } elem_size = sqrt(elem_size); ielem->SetValue(MEAN_SIZE, elem_size); } } //matrix containing the position of the 4/15/45 particles that we will seed at the beggining BoundedMatrix<double, 5*(1+TDim), 3 > pos; BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N; int particle_id=0; mNElems = mrModelPart.Elements().size(); std::cout << " about to resize vectors" << std::endl; //setting the right size to the vector containing the particles assigned to each element //particles vector. this vector contains ALL the particles in the simulation. mParticlesVector.resize(mNElems*mMaxNumberOfParticles); //and this vector contains the current number of particles that are in each element (currently zero) mNumOfParticlesInElems.resize(mNElems); mNumOfParticlesInElems=ZeroVector(mNElems); //when moving the particles, an auxiliary vector is necessary (to store the previous number) mNumOfParticlesInElemsAux.resize(mNElems); //each element will have a list of pointers to all the particles that are inside. //this vector contains the pointers to the vector of (particle) pointers of each element. mVectorOfParticlePointersVectors.resize(mNElems); //int artz; //std::cin >> artz; int i_int=0; //careful! it's not the id, but the position inside the array! std::cout << " about to create particles" << std::endl; //now we seed: LOOP IN ELEMENTS //using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mMaxNumberOfParticles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one mOffset=0; //ShallowParticle& firstparticle = mParticlesVector[0]; for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; //(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mMaxNumberOfParticles*2, &firstparticle ); //ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //now we link the mpointers_to_particle_pointers_vectors to the corresponding element //mpointers_to_particle_pointers_vectors(ii) = &particle_pointers; //now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half). //for(int j=0; j<(mMaxNumberOfParticles*2); j++) // particle_pointers.push_back(&firstparticle); mVectorOfParticlePointersVectors[ii] = ParticlePointerVector( mMaxNumberOfParticles*2 ); ParticlePointerVector& particle_pointers = mVectorOfParticlePointersVectors[ii]; //int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles = mNumOfParticlesInElems[ii]; number_of_particles=0; Geometry< Node<3> >& geom = ielem->GetGeometry(); //unsigned int elem_id = ielem->Id(); ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45 //now we seed the particles in the current element for (unsigned int j = 0; j < pos.size1(); j++) { ++particle_id; ShallowParticle& pparticle = mParticlesVector[particle_id-1]; //~ pparticle.X()=pos(j,0); //~ pparticle.Y()=pos(j,1); //~ pparticle.Z()=pos(j,2); pparticle.Coordinates() = row(pos,j); pparticle.GetEraseFlag()=false; array_1d<float, 3 > & vector1 = pparticle.GetVector1(); float & scalar1 = pparticle.GetScalar1(); noalias(vector1) = ZeroVector(3); scalar1=0.0; for (unsigned int k = 0; k < (TDim+1); k++) { scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(*mScalarVar1); noalias(vector1) += N(j, k) * geom[k].FastGetSolutionStepValue(*mVectorVar1); } particle_pointers(j) = &pparticle; number_of_particles++ ; } ++i_int; } mNParticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true. std::cout << " [Creating particles : " << mNParticles << " particles created]" << std::endl; mParticlePrintingToolInitialized=false; KRATOS_CATCH("") } ~MoveShallowWaterParticleUtility() {} void MountBin() { KRATOS_TRY //copy the elements to a new container, as the list will //be shuffled duringthe construction of the tree ContainerType& rElements = mrModelPart.ElementsArray(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); //const int number_of_elem = rElements.size(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) ); paux.swap(mpBinsObjectDynamic); //BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end ); std::cout << " finished mounting Bins" << std::endl; KRATOS_CATCH("") } /// Calculates the mean velocity /** This function computes the mean velocity within an element and * stores it in MEAN_VEL_OVER_ELEM_SIZE variable. * This variable keeps the courant number aprox 0.1 in each substep * * @see MoveParticle * @see MoveParticleInverseWay */ void CalculateVelOverElemSize() { KRATOS_TRY const double nodal_weight = 1.0/ (1.0 + double (TDim) ); ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); std::vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<double, 3 >vector_mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY); vector_mean_velocity *= nodal_weight; //~ const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) ); const double mean_velocity = norm_2( vector_mean_velocity ); ielem->SetValue(MEAN_VEL_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) ); } } KRATOS_CATCH("") } /// Reset the boundary conditions /** When a variable is fixed this function resets the nodal values * with the previous time step */ void ResetBoundaryConditions() { KRATOS_TRY typedef VariableComponent<VectorComponentAdaptor<array_1d<double, 3> > > component_type; component_type vector_var_x = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_X")); component_type vector_var_y = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_Y")); component_type vector_var_z = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_Z")); ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(*mScalarVar1)) { inode->FastGetSolutionStepValue(*mScalarVar1)=inode->GetSolutionStepValue(*mScalarVar1,1); } if (inode->IsFixed(vector_var_x)) { inode->FastGetSolutionStepValue(vector_var_x)=inode->GetSolutionStepValue(vector_var_x,1); } if (inode->IsFixed(vector_var_y)) { inode->FastGetSolutionStepValue(vector_var_y)=inode->GetSolutionStepValue(vector_var_y,1); } if (inode->IsFixed(vector_var_z)) { inode->FastGetSolutionStepValue(vector_var_z)=inode->GetSolutionStepValue(vector_var_z,1); } } } KRATOS_CATCH("") } /// Auxiliar function to compute the "delta variables" /** Delta variables are the difference between two time steps. * It's value is used to update particles info * * @see CorrectParticlesWithoutMovingUsingDeltaVariables */ void CalculateDeltaVariables() { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(*mScalarVar1) - inode->FastGetSolutionStepValue(PROJECTED_SCALAR1); inode->FastGetSolutionStepValue(DELTA_VECTOR1) = inode->FastGetSolutionStepValue(*mVectorVar1) - inode->FastGetSolutionStepValue(PROJECTED_VECTOR1); //PROJECTED_VECTOR1 } } KRATOS_CATCH("") } /// Auxiliar function /** This function copy a scalar variable value to the previous time step */ void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } /// Auxiliar function /** This function copy a vector variable value to the previous time step */ void CopyVectorVarToPreviousTimeStep(const Variable<array_1d<double,3>>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; noalias(inode->GetSolutionStepValue(OriginVariable,1)) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } /// Move all the particles /** This function moves the particles across the streamlines * according to the velocity given by VELOCITY variable. The * movement is performed in nsubsteps, during a total time * of DELTA_TIME * * @see Moveparticle */ void MoveParticles() { KRATOS_TRY const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo(); const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part) //since it is the only function in the whole procedure that does this, it must use alternatively one part and the other. bool even_timestep; if (offset!=0) even_timestep=false; else even_timestep=true; const int post_offset = mMaxNumberOfParticles * static_cast<int>(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,TDim+1> N; const unsigned int max_results = 10000; //double integration_distance= 2.0; mMaxSubSteps = 10; mMaxSubStepDt = delta_t / static_cast<double>(mMaxSubSteps); std::vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; int & number_of_particles = mNumOfParticlesInElems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES); mNumOfParticlesInElemsAux[ii] = number_of_particles; mNumOfParticlesInElems[ii] = 0; //we reset the local vectors for a faster access; } } std::cout << "convecting particles" << std::endl; //We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle) #pragma omp barrier #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { ResultContainerType results(max_results); GlobalPointersVector< Element > elements_in_trajectory; elements_in_trajectory.resize(20); for(unsigned int ielem = element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++) { ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem; const int old_element_id = old_element->Id(); ParticlePointerVector& old_element_particle_pointers = mVectorOfParticlePointersVectors[old_element_id-1]; if ( (results.size()) != max_results ) results.resize(max_results); unsigned int number_of_elements_in_trajectory = 0; //excluding the origin one (current one, ielem) for (int ii = 0; ii < mNumOfParticlesInElemsAux[ielem]; ii++) { ShallowParticle& pparticle = old_element_particle_pointers[offset+ii]; Element::Pointer pcurrent_element( *old_element.base() ); ResultIteratorType result_begin = results.begin(); bool & erase_flag=pparticle.GetEraseFlag(); if (erase_flag == false){ MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina const int current_element_id = pcurrent_element->Id(); int & number_of_particles_in_current_elem = mNumOfParticlesInElems[current_element_id-1]; if (number_of_particles_in_current_elem < mMaxNumberOfParticles && erase_flag == false) { ParticlePointerVector& current_element_particle_pointers = mVectorOfParticlePointersVectors[current_element_id-1]; #pragma omp critical { if (number_of_particles_in_current_elem < mMaxNumberOfParticles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!! { current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle; number_of_particles_in_current_elem++ ; KRATOS_ERROR_IF( number_of_particles_in_current_elem > mMaxNumberOfParticles ) << "In move shallow water particle utility: exceeded maximum number of particles" << std::endl; //~ if (number_of_particles_in_current_elem > mMaxNumberOfParticles) //~ KRATOS_WATCH("MAL"); } else { pparticle.GetEraseFlag()=true; //so we just delete it! } } } else { pparticle.GetEraseFlag()=true; //so we just delete it! } } } } } // After having changed everything we change the status of the mOddTimeStep flag: mOffset = post_offset;; // KRATOS_CATCH("") } /// Transfer particles information to the mesh nodes /** This function explicitly projects data from particles (lagrangian) * onto the eulerian mesh. Shape functions of the elements determine * the particle location within the element and its contribution to * each node as a weighting function. */ void TransferLagrangianToEulerian() //explicit { KRATOS_TRY const double threshold = 1e-10 / (static_cast<double>(TDim)+1.0); std::cout << "projecting info to mesh" << std::endl; const int offset = mOffset; // the array of pointers for each element has twice the required size so that // we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles) // We must project data from the particles (lagrangian) onto the eulerian mesh //int nnodes = mrModelPart.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; // We save data from previous time step of the eulerian mesh in case we must reuse it later // cos no particle was found around the nodes though we could've use a bigger buffer, to be changed later! // after having saved data, we reset them to zero, this way it's easier to add the contribution // of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=0.0; inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=ZeroVector(3); inode->FastGetSolutionStepValue(YP)=0.0; } } // Adding contribution, loop on elements, since each element has stored the particles found inside of it std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,3*(TDim+1)> nodes_added_vector1 = ZeroVector(3*(TDim+1)); array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_added_weights = ZeroVector((TDim+1)); //array_1d<double,(TDim+1)> weighting_inverse_divisor; Geometry<Node<3> >& geom = ielem->GetGeometry(); for (int i=0 ; i!=(TDim+1) ; ++i) { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); //weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01); } int & number_of_particles_in_elem= mNumOfParticlesInElems[ii]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii]; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mMaxNumberOfParticles) // It means we are out of our portion of the array, abort loop! break; ShallowParticle& pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const float& particle_scalar1 = pparticle.GetScalar1(); const array_1d<float,3>& particle_vector1 = pparticle.GetVector1(); array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) // Something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl; for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { // These lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions //double sq_dist = 0; //for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k])); //double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) ); double weight=N(j)*N(j); //weight=N(j)*N(j)*N(j); if (weight<threshold) weight=1e-10; nodes_added_weights[j] += weight; nodes_added_scalar1[j] += weight*static_cast<double>(particle_scalar1); for (int k=0 ; k!=(TDim); k++) //x,y,(z) { nodes_added_vector1[j*3+k] += weight * static_cast<double>(particle_vector1[k]); } } } } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(PROJECTED_SCALAR1) += nodes_added_scalar1[i]; geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_X) += nodes_added_vector1[3*i+0]; geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Y) += nodes_added_vector1[3*i+1]; geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Z) += nodes_added_vector1[3*i+2]; geom[i].FastGetSolutionStepValue(YP) += nodes_added_weights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { double & scalar = inode->FastGetSolutionStepValue(PROJECTED_SCALAR1); array_1d<double,3> & vector = inode->FastGetSolutionStepValue(PROJECTED_VECTOR1); scalar /=sum_weights; // resetting the scalar1 vector /=sum_weights; // resetting the vector1 } else // This should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=inode->FastGetSolutionStepValue(*mScalarVar1,1); // Resetting the convected scalar inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=inode->FastGetSolutionStepValue(*mVectorVar1,1); // Resetting the convected vector } } } KRATOS_CATCH("") } /// Update all the particles without moving them /** This function updates all the particles variables using the * "delta variables" from the nodal database. * * @see CorrectParticleUsingDeltaVariables */ void CorrectParticlesWithoutMovingUsingDeltaVariables() { KRATOS_TRY const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles) ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); std::vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles_in_elem= mNumOfParticlesInElems[ii]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii]; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii>mMaxNumberOfParticles) //it means we are out of our portion of the array, abort loop! break; ShallowParticle & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper } } } } KRATOS_CATCH("") } /// Fill an element with particles /** This function is to be executed after moving particles and * before tranferring data from lagrangian particles to eulerian mesh * If an element finishes with less particles than "minimum number * of particles", then PreReseed adds particles inside it. * A minimal reseed is performed in order to not disturb the projection * from lagrangian to euelrian. * * @see MinimumNumberOfParticles * * @see MoveParticles * @see MoveParticleInverseWay: is called to get the particle values */ void PreReseed(int MinimumNumberOfParticles) { KRATOS_TRY const int offset =mOffset; const int max_results = 1000; //tools for the paralelization unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); std::vector<unsigned int> elem_partition; int number_of_rows = mrModelPart.Elements().size(); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); #pragma omp parallel firstprivate(elem_partition) { ResultContainerType results(max_results); int k = OpenMPUtils::ThisThread(); //ModelPart::ElementsContainerType::iterator it_begin = mrModelPart.ElementsBegin() + elem_partition[k]; //ModelPart::ElementsContainerType::iterator it_end = mrModelPart.ElementsBegin() + elem_partition[k+1] ; //ModelPart::NodesContainerType local_list=aux[k]; //PointerVectorSet<ShallowParticle, IndexedObject> & list=aux[k]; BoundedMatrix<double, (TDim+1), 3 > pos; BoundedMatrix<double, (TDim+1) , (TDim+1) > N; unsigned int freeparticle=0; //we start with the first position in the particles array //int local_id=1; for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; results.resize(max_results); //const int & elem_id = ielem->Id(); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles_in_elem = mNumOfParticlesInElems[ii]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii]; if (number_of_particles_in_elem < (MinimumNumberOfParticles)) // && (ielem->GetGeometry())[0].Y()<0.10 ) { Geometry< Node<3> >& geom = ielem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); for (unsigned int j = 0; j < (pos.size1()); j++) // I am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mParticlesVector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mParticlesVector[freeparticle].GetEraseFlag()==true) { mParticlesVector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else freeparticle++; } ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux2_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N); KRATOS_ERROR_IF_NOT( is_found ) << "In move shallow water particle utility: particle not found in domain" << std::endl; pparticle.GetEraseFlag()=false; ResultIteratorType result_begin = results.begin(); Element::Pointer pelement( *ielem.base() ); MoveParticleInverseWay(pparticle, pelement, result_begin, max_results); //and we copy it to the array: mParticlesVector[freeparticle] = pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle]; pparticle.GetEraseFlag()=false; number_of_particles_in_elem++; } } } } KRATOS_CATCH("") } /// Fill an element with particles /** This function is to be executed after the mesh stage solver is * called and the particles are updated. * If an element contains less particles than "minimum number of * particles", then PostReseed adds particles inside it. * A full reseed is performed and the particle gets it's convected * variables directly from the eulerian mesh * * @param MinimumNumberOfParticles * * @see PreReseed */ void PostReseed(int MinimumNumberOfParticles) //pooyan's way { KRATOS_TRY const int offset = mOffset; //TOOLS FOR THE PARALELIZATION unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); std::vector<unsigned int> elem_partition; int number_of_rows=mrModelPart.Elements().size(); //KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", ""); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); #pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids { unsigned int reused_particles=0; unsigned int freeparticle = 0; //we start by the first position; int k = OpenMPUtils::ThisThread(); BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D) BoundedMatrix<double, (3+2*TDim), (TDim+1) > N; double mesh_scalar1; array_1d<double,3> mesh_vector1; array_1d<int, (3+2*TDim) > positions; unsigned int number_of_reseeded_particles; for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; int & number_of_particles_in_elem = mNumOfParticlesInElems[ii]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii]; Geometry< Node<3> >& geom = ielem->GetGeometry(); if ( number_of_particles_in_elem < (MinimumNumberOfParticles) ) // && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(MinimumNumberOfParticles) ) ) { //bool reseed_more=false; number_of_reseeded_particles = 0; //reseed_more=true; number_of_reseeded_particles = 3 + 2*TDim; ComputeGaussPointPositionsForPostReseed(geom, pos, N); for (unsigned int j = 0; j < number_of_reseeded_particles; j++) { // Now we have to find an empty space (a particle that was about to be deleted) in the // particles model part. once found. there will be our renewed particle: bool keep_looking = true; while(keep_looking) { if (mParticlesVector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mParticlesVector[freeparticle].GetEraseFlag()==true) { mParticlesVector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else freeparticle++; } ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N); KRATOS_ERROR_IF_NOT( is_found ) << "In move shallow water particle utility: particle not found in domain" << std::endl; mesh_scalar1 = 0.0; mesh_vector1 = ZeroVector(3); for (unsigned int l = 0; l < (TDim+1); l++) { mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(*mScalarVar1); noalias(mesh_vector1) += N(j, l) * geom[l].FastGetSolutionStepValue(*mVectorVar1); } pparticle.GetScalar1()=mesh_scalar1; pparticle.GetVector1()=mesh_vector1; pparticle.GetEraseFlag()=false; mParticlesVector[freeparticle]=pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle]; number_of_particles_in_elem++; KRATOS_ERROR_IF( keep_looking ) << "In move shallow water particle utility: Finished the list and couldnt find a free cell for the new particle!" << std::endl; reused_particles++; } } } } KRATOS_CATCH("") } /// Fill a model part with particles /** This function prints the particles to a model part * * @param rLagrangianModelPart: empty model part to print particles * @param FilterFactor: the function will print one particle of every "filter factor" */ void ExecuteParticlesPrintingTool( ModelPart& rLagrangianModelPart, unsigned int FilterFactor ) { KRATOS_TRY // We will only print one out of every "filter factor" particles of the total particle list if (mParticlePrintingToolInitialized == false) { KRATOS_ERROR_IF( rLagrangianModelPart.NodesBegin() - rLagrangianModelPart.NodesEnd() > 0 ) << "In move shallow water particle utility: an empty model part is required for the particles printing tool" << std::endl; rLagrangianModelPart.AddNodalSolutionStepVariable(*mScalarVar1); rLagrangianModelPart.AddNodalSolutionStepVariable(DISPLACEMENT); for (unsigned int i = 0; i != ((mMaxNumberOfParticles*mNElems)/FilterFactor) + FilterFactor; i++) { Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode( i+mLastNodeId+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! //pnode->SetBufferSize(mrModelPart.NodesBegin()->GetBufferSize()); pnode->SetBufferSize(1); } mParticlePrintingToolInitialized=true; } // Resetting data of the unused particles const double inactive_particle_position = -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = rLagrangianModelPart.NodesBegin(); for(unsigned int ii = 0; ii < rLagrangianModelPart.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(*mScalarVar1) = 0.0; inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } int counter = 0; //ModelPart::NodesContainerType::iterator it_begin = rLagrangianModelPart.NodesBegin(); for (int i = 0; i != mMaxNumberOfParticles*mNElems; i++) { ShallowParticle& pparticle = mParticlesVector[i]; if(pparticle.GetEraseFlag() == false && i%FilterFactor == 0) { ModelPart::NodesContainerType::iterator inode = inodebegin + counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(*mScalarVar1) = pparticle.GetScalar1(); inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates(); counter++; } } KRATOS_CATCH("") } protected: private: /// Move a particle /** this function moves a particle according to the velocity given * by VELOCITY variable. The movement is performed in nsubsteps, * during a total time of DELTA_TIME * * @param pParticle * @param pElement * @param rElementsInTrajectory * @param rNumberOfElementsInTrajectory * @param ResultBegin * @param MaxNumberOfResults * * @see MoveParticles */ void MoveParticle(ShallowParticle & pParticle, Element::Pointer & pElement, GlobalPointersVector< Element >& rElementsInTrajectory, unsigned int & rNumberOfElementsInTrajectory, ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool keep_integrating = false; bool is_found; array_1d<double,3> vel; array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3); array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pParticle.Coordinates(); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { keep_integrating=true; Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in vel=ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; } //calculating substep to get +- courant(substep) = 0.1 nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position += vel*substep_dt;//weight; // DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH VELOCITY unsigned int check_from_element_number = 0; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (keep_integrating == true) { is_found = FindNodeOnMesh(position, N, pElement, rElementsInTrajectory, rNumberOfElementsInTrajectory, check_from_element_number, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; } only_integral += 1.0; //values saved for the current time step position+=vel*substep_dt;//weight; } else { keep_integrating=false; break; } } else break; } } if (keep_integrating == false) (pParticle.GetEraseFlag()=true); else is_found = FindNodeOnMesh(position, N ,pElement,ResultBegin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pElement) if (is_found == false) ( pParticle.GetEraseFlag()=true); pParticle.Coordinates() = position; } /// This function updates a particle /** This function updates a particle variables using the "delta * variables" from the nodal database. * * @param pParticle * @param pElement * @param rGeom * * @see CorrectParticlesWithoutMovingUsingDeltaVariables */ void CorrectParticleUsingDeltaVariables(ShallowParticle & pParticle, Element::Pointer & pElement, Geometry< Node<3> >& rGeom) { array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pParticle.Coordinates(); float & particle_scalar1 = pParticle.GetScalar1(); array_1d<float,3> & particle_vector1 = pParticle.GetVector1(); //double distance=0.0; double delta_scalar1 = 0.0; array_1d<double,3> delta_vector1 = ZeroVector(3); bool is_found = CalculatePosition(rGeom,coords[0],coords[1],coords[2],N); if(is_found == false) { KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl; for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 ) N[j]=1e-10; } for(unsigned int j=0; j<(TDim+1); j++) { delta_scalar1 += rGeom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j]; noalias(delta_vector1) += rGeom[j].FastGetSolutionStepValue(DELTA_VECTOR1)*N[j]; } particle_scalar1 = particle_scalar1 + delta_scalar1; particle_vector1 = particle_vector1 + delta_vector1; } /// Move a particle in the inverse way /** this function moves a particle according to the -velocity given * by VELOCITY variable. The movement is performed by a backward * integration in nsubsteps, during a total time of DELTA_TIME * Before the particle goes out of the element, gets the value * of the eulerian mesh and stores it * * @param pParticle * @param pElement * @param ResultBegin * @param MaxNumberOfResults * * @see PreReseed */ void MoveParticleInverseWay(ShallowParticle & pParticle, Element::Pointer & pElement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO! ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool keep_integrating = false; bool is_found; double scalar1 = 0.0; array_1d<double,3> vector1; array_1d<double,3> vel; array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pParticle.Coordinates(); // + (pParticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { keep_integrating = true; Geometry< Node<3> >& geom = pElement->GetGeometry(); //the element we're in scalar1 = 0.0; vector1 = ZeroVector(3); vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { scalar1 += geom[j].FastGetSolutionStepValue(*mScalarVar1)*N[j]; noalias(vector1) += geom[j].FastGetSolutionStepValue(*mVectorVar1)*N[j]; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; } //calculating substep to get +- courant(substep) = 1/4 nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0; // weight;//*double(nsubsteps); position -= vel*substep_dt; //weight; for(unsigned int i=0; i<(nsubsteps-1); i++) // this is for the substeps n+1. in the first one we already knew the position of the particle. { if (keep_integrating == true) { is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if (is_found == true) { Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in scalar1 = 0.0; vector1 = ZeroVector(3); vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { scalar1 += geom[j].FastGetSolutionStepValue(*mScalarVar1)*N(j); noalias(vector1) += geom[j].FastGetSolutionStepValue(*mVectorVar1)*N[j]; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; } only_integral += 1.0; //weight ; //values saved for the current time step position -= vel*substep_dt; //weight; } else keep_integrating = false; } } pParticle.GetScalar1() = scalar1; pParticle.GetVector1() = vector1; } } /// Find the element into which a given node is located /** This function should find the element into which a given node * is located and return a pointer to the element and the vector * containing the shape functions that define the positions within * the element. * If false is returned the element is not found * * @param position of the node * @param N: return shape functions that define the positions within the elem * @param pElement: return a pointer to the element * @param ResultBegin * @param MaxNumberOfResults * @return FindNodeOnMesh if the element is found of not * * @see CalculatePosition */ bool FindNodeOnMesh( const array_1d<double,3>& rPosition, array_1d<double,TDim+1>& N, Element::Pointer & pElement, ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N); if (is_found_1) //that was easy! { return true; } // To begin with we check the neighbour elements; it is a bit more expensive GlobalPointersVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS); for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N); if (is_found_2) { pElement = neighb_elems[i].shared_from_this(); return true; } } // If checking all the neighbour elements did not work, we have to use the bins // ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults ); if (results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(ResultBegin + i))->GetGeometry(); //find local position bool is_found_3 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N); if (is_found_3) { pElement = (*(ResultBegin + i))->shared_from_this(); return true; } } } //if nothing worked, then: //not found case return false; } /// Find the element into which a given node is located /** This function should find the element into which a given node * is located and return a pointer to the element and the vector * containing the shape functions that define the positions within * the element. * If false is returned the element is not found * This version includes predefined elements following a trajectory * * @param rPosition of the node * @param N Output shape functions that define the positions within the elem * @param pElement Output a pointer to the element * @param rElementsInTrajectory * @param rNumberOfElementsInTrajectory Output * @param CheckFromElementNumber * @param ResultBegin * @param MaxNumberOfResults * @return FindNodeOnMesh if the element is found of not * * @see CalculatePosition */ bool FindNodeOnMesh( const array_1d<double,3>& rPosition, array_1d<double,TDim+1>& N, Element::Pointer & pElement, GlobalPointersVector< Element >& rElementsInTrajectory, unsigned int & rNumberOfElementsInTrajectory, unsigned int & rCheckFromElementNumber, ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; //~ const array_1d<double,3>& coords = rPosition; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N); if(is_found_1 == true) { return true; //that was easy! } // If it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element. for (unsigned int i=(rCheckFromElementNumber);i!=rNumberOfElementsInTrajectory;i++) { Geometry<Node<3> >& geom = rElementsInTrajectory[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],aux_N); if (is_found_2) { pElement = rElementsInTrajectory[i].shared_from_this(); N = aux_N; rCheckFromElementNumber = i+1 ; //now i element matches pElement, so to avoid cheching twice the same element we send the counter to the following element. return true; } } // Now we check the neighbour elements: GlobalPointersVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS); for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N); if (is_found_2) { pElement = neighb_elems[i].shared_from_this(); if (rNumberOfElementsInTrajectory<20) { rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement; rNumberOfElementsInTrajectory++; rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list } return true; } } // If checking all the neighbour elements did not work, we have to use the bins // ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults ); if(results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(ResultBegin + i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N); if (is_found) { pElement = (*(ResultBegin + i))->shared_from_this(); if (rNumberOfElementsInTrajectory<20) { rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement; rNumberOfElementsInTrajectory++; rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list } return true; } } } //not found case return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rGeom: the element (a triangle) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom, const double xc, const double yc, const double zc, array_1d<double,3> & N ) { double x0 = rGeom[0].X(); double y0 = rGeom[0].Y(); double x1 = rGeom[1].X(); double y1 = rGeom[1].Y(); double x2 = rGeom[2].X(); double y2 = rGeom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl; double inv_area = 1.0 / area; N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rNodesPositions of the element (a triangle) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions, const double xc, const double yc, const double zc, array_1d<double,3> & N ) { const double& x0 = rNodesPositions[0]; const double& y0 = rNodesPositions[1]; const double& x1 = rNodesPositions[3]; const double& y1 = rNodesPositions[4]; const double& x2 = rNodesPositions[6]; const double& y2 = rNodesPositions[7]; double area = CalculateVol(x0, y0, x1, y1, x2, y2); KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl; double inv_area = 1.0 / area; N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rGeom: the element (a tetrahedron) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = rGeom[0].X(); double y0 = rGeom[0].Y(); double z0 = rGeom[0].Z(); double x1 = rGeom[1].X(); double y1 = rGeom[1].Y(); double z1 = rGeom[1].Z(); double x2 = rGeom[2].X(); double y2 = rGeom[2].Y(); double z2 = rGeom[2].Z(); double x3 = rGeom[3].X(); double y3 = rGeom[3].Y(); double z3 = rGeom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl; double inv_vol = 1.0 / vol; N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rNodesPositions of the element (a tetrahedron) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { const double& x0 = rNodesPositions[0]; const double& y0 = rNodesPositions[1]; const double& z0 = rNodesPositions[2]; const double& x1 = rNodesPositions[3]; const double& y1 = rNodesPositions[4]; const double& z1 = rNodesPositions[5]; const double& x2 = rNodesPositions[6]; const double& y2 = rNodesPositions[7]; const double& z2 = rNodesPositions[8]; const double& x3 = rNodesPositions[9]; const double& y3 = rNodesPositions[10]; const double& z3 = rNodesPositions[11]; double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl; double inv_vol = 1.0 / vol; N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /// Calculate the volume /** This function computes the area of a triangle */ inline double CalculateVol( const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } /// Calculate the volume /** This function computes the volume of a tetrahedron */ inline double CalculateVol( const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_4( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos, BoundedMatrix<double, 7, 3 > & N ) { double one_third = 1.0 / 3.0; double one_sixt = 0.15; //1.0 / 6.0; double two_third = 0.7; //2.0 * one_third; N(0, 0) = one_sixt; N(0, 1) = one_sixt; N(0, 2) = two_third; N(1, 0) = two_third; N(1, 1) = one_sixt; N(1, 2) = one_sixt; N(2, 0) = one_sixt; N(2, 1) = two_third; N(2, 2) = one_sixt; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; //first pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X(); pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y(); pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z(); //second pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X(); pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y(); pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z(); //third pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X(); pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y(); pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); } /// Compute the Gauss points /** For a triangle * * @see PostReseed */ void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos, BoundedMatrix<double, 7, 3 > & N ) //2d { double one_third = 1.0 / 3.0; double one_eight = 0.12; //1.0 / 6.0; double three_quarters = 0.76; //2.0 * one_third; N(0, 0) = one_eight; N(0, 1) = one_eight; N(0, 2) = three_quarters; N(1, 0) = three_quarters; N(1, 1) = one_eight; N(1, 2) = one_eight; N(2, 0) = one_eight; N(2, 1) = three_quarters; N(2, 2) = one_eight; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; N(4, 0) = one_eight; N(4, 1) = 0.44; N(4, 2) = 0.44; N(5, 0) = 0.44; N(5, 1) = one_eight; N(5, 2) = 0.44; N(6, 0) = 0.44; N(6, 1) = 0.44; N(6, 2) = one_eight; //first pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X(); pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y(); pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z(); //second pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X(); pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y(); pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z(); //third pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X(); pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y(); pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); //fifth pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X(); pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y(); pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z(); //sixth pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X(); pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y(); pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z(); //seventh pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X(); pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y(); pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z(); } /// Compute the Gauss points /** For a tetrahedron * * @see PostReseed */ void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos, BoundedMatrix<double, 9, 4 > & N ) //3D { double one_quarter = 0.25; double small_fraction = 0.1; //1.0 / 6.0; double big_fraction = 0.7; //2.0 * one_third; double mid_fraction = 0.3; //2.0 * one_third; N(0, 0) = big_fraction; N(0, 1) = small_fraction; N(0, 2) = small_fraction; N(0, 3) = small_fraction; N(1, 0) = small_fraction; N(1, 1) = big_fraction; N(1, 2) = small_fraction; N(1, 3) = small_fraction; N(2, 0) = small_fraction; N(2, 1) = small_fraction; N(2, 2) = big_fraction; N(2, 3) = small_fraction; N(3, 0) = small_fraction; N(3, 1) = small_fraction; N(3, 2) = small_fraction; N(3, 3) = big_fraction; N(4, 0) = one_quarter; N(4, 1) = one_quarter; N(4, 2) = one_quarter; N(4, 3) = one_quarter; N(5, 0) = small_fraction; N(5, 1) = mid_fraction; N(5, 2) = mid_fraction; N(5, 3) = mid_fraction; N(6, 0) = mid_fraction; N(6, 1) = small_fraction; N(6, 2) = mid_fraction; N(6, 3) = mid_fraction; N(7, 0) = mid_fraction; N(7, 1) = mid_fraction; N(7, 2) = small_fraction; N(7, 3) = mid_fraction; N(8, 0) = mid_fraction; N(8, 1) = mid_fraction; N(8, 2) = mid_fraction; N(8, 3) = small_fraction; pos=ZeroMatrix(9,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=9; j++) //going through the 9 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } /// Compute the Gauss points /** For a triangle * * @see PreReseed */ void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos, BoundedMatrix<double, 3, 3 > & N ) //2D { N(0, 0) = 0.5; N(0, 1) = 0.25; N(0, 2) = 0.25; N(1, 0) = 0.25; N(1, 1) = 0.5; N(1, 2) = 0.25; N(2, 0) = 0.25; N(2, 1) = 0.25; N(2, 2) = 0.5; //first pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X(); pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y(); pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z(); //second pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X(); pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y(); pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z(); //third pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X(); pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y(); pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z(); } /// Compute the Gauss points /** For a tetrahedron * * @see PreReseed */ void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos, BoundedMatrix<double, 4, 4 > & N ) //3D { //creating 4 particles, each will be closer to a node and equidistant to the other nodes N(0, 0) = 0.4; N(0, 1) = 0.2; N(0, 2) = 0.2; N(0, 3) = 0.2; N(1, 0) = 0.2; N(1, 1) = 0.4; N(1, 2) = 0.2; N(1, 3) = 0.2; N(2, 0) = 0.2; N(2, 1) = 0.2; N(2, 2) = 0.4; N(2, 3) = 0.2; N(3, 0) = 0.2; N(3, 1) = 0.2; N(3, 2) = 0.2; N(3, 3) = 0.4; pos=ZeroMatrix(4,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=4; j++) //going through the 4 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_45( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos, BoundedMatrix<double, 45, 3 > & N ) { unsigned int counter=0; for (unsigned int i=0; i!=9;i++) { for (unsigned int j=0; j!=(9-i);j++) { N(counter,0)=0.05+double(i)*0.1; N(counter,1)=0.05+double(j)*0.1; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); counter++; } } } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos, BoundedMatrix<double, 15, 3 > & N ) //2D { unsigned int counter=0; for (unsigned int i=0; i!=5;i++) { for (unsigned int j=0; j!=(5-i);j++) { N(counter,0)=0.05+double(i)*0.2; N(counter,1)=0.05+double(j)*0.2; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); counter++; } } } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos, BoundedMatrix<double, 20, 4 > & N ) //3D { double fraction_increment; unsigned int counter=0; for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles { for (unsigned int j=0; j!=(4-i);j++) { for (unsigned int k=0; k!=(4-i-j);k++) { N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1) //total = 1.0 - N(counter,0); fraction_increment = 0.27; // N(counter,1)=fraction_increment * (0.175 + double(j)); N(counter,2)=fraction_increment * (0.175 + double(k)); N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z(); counter++; } } } } /// check function virtual int Check() { KRATOS_TRY Node<3>& rnode = *mrModelPart.NodesBegin(); KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*mVectorVar1), rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*mScalarVar1), rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(VELOCITY, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_VECTOR1, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_SCALAR1, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_VECTOR1, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_SCALAR1, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(MEAN_SIZE, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(YP, rnode) return 0; KRATOS_CATCH("") } /// Member variables ModelPart& mrModelPart; int mNParticles; int mNElems; int mOffset; int mMaxSubSteps; double mMaxSubStepDt; int mMaxNumberOfParticles; std::vector< ShallowParticle > mParticlesVector; int mLastElemId; bool mOddTimeStep; bool mParticlePrintingToolInitialized; unsigned int mLastNodeId; DenseVector<int> mNumOfParticlesInElems; DenseVector<int> mNumOfParticlesInElemsAux; DenseVector<ParticlePointerVector> mVectorOfParticlePointersVectors; typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic; const Variable<double>* mScalarVar1; const Variable<array_1d<double,3>>* mVectorVar1; std::string m_scalar_var1_name; std::string m_vector_var1_name; }; // class MoveShallowWaterParticleUtility } // namespace Kratos. #endif // KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED defined
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static EIGEN_UNUSED int m_maxThreads = -1; if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2; internal::manage_caching_sizes(GetAction, &l1, &l2); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {} int volatile sync; int volatile users; Index rhs_start; Index rhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // 1- are we already in a parallel session? // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Index size = transpose ? cols : rows; // 2- compute the maximal number of threads from the size of the product: // FIXME this has to be fine tuned Index max_threads = std::max<Index>(1,size / 32); // 3 - compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), max_threads); if(threads==1) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(); if(transpose) std::swap(rows,cols); GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads]; #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads) & ~Index(0x7); Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].rhs_start = c0; info[i].rhs_length = actualBlockCols; if(transpose) func(0, cols, r0, actualBlockRows, info); else func(r0, actualBlockRows, 0,cols, info); } delete[] info; #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
GB_binop__fmod_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__fmod_fp64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__fmod_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__fmod_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__fmod_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__fmod_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__fmod_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__fmod_fp64) // C=scalar+B GB (_bind1st__fmod_fp64) // C=scalar+B' GB (_bind1st_tran__fmod_fp64) // C=A+scalar GB (_bind2nd__fmod_fp64) // C=A'+scalar GB (_bind2nd_tran__fmod_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = fmod (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = fmod (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FMOD || GxB_NO_FP64 || GxB_NO_FMOD_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__fmod_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__fmod_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__fmod_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__fmod_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__fmod_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__fmod_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__fmod_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__fmod_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__fmod_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = fmod (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__fmod_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = fmod (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmod (x, aij) ; \ } GrB_Info GB (_bind1st_tran__fmod_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmod (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__fmod_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_realloc_def_fb.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <omp.h> int main() { omp_alloctrait_t at[2]; omp_allocator_handle_t a; omp_allocator_handle_t f_a; void *ptr[2]; void *nptr[2]; at[0].key = omp_atk_pool_size; at[0].value = 2 * 1024 * 1024; at[1].key = omp_atk_fallback; at[1].value = omp_atv_default_mem_fb; a = omp_init_allocator(omp_large_cap_mem_space, 2, at); f_a = omp_init_allocator(omp_default_mem_space, 2, at); printf("allocator large created: %p\n", (void *)a); printf("allocator default created: %p\n", (void *)f_a); #pragma omp parallel num_threads(2) { int i = omp_get_thread_num(); ptr[i] = omp_alloc(1024 * 1024, f_a); #pragma omp barrier nptr[i] = omp_realloc(ptr[i], 1024 * 1024, a, f_a); #pragma omp barrier printf("th %d, nptr %p\n", i, nptr[i]); omp_free(nptr[i], a); } // Both pointers should be non-NULL if (nptr[0] != NULL && nptr[1] != NULL) { printf("passed\n"); return 0; } else { printf("failed: pointers %p %p\n", nptr[0], nptr[1]); return 1; } }
LinearSolvers.h
#ifndef USE_EIGEN #define USE_EIGEN 0 #endif // USE_EIGEN #ifndef USE_CHOLMOD #define USE_CHOLMOD 0 #endif // USE_CHOLMOD #if USE_CHOLMOD #pragma message( "[WARNING] Need to explicitly exclude VCOMP.lib" ) #include <Cholmod/cholmod.h> #pragma comment( lib , "CHOLMOD.lib" ) #define DLONG #ifdef DLONG typedef long long SOLVER_LONG; #define CHOLMOD( name ) cholmod_l_ ## name #else // !DLONG typedef int SOLVER_LONG; #define CHOLMOD( name ) cholmod_ ## name #endif // DLONG #endif // USE_CHOLMOD #include <Util/SparseMatrix.h> double SquareNorm( const double* values , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += values[i] * values[i] ; return norm2; } double SquareNorm( const float* values , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += values[i] * values[i] ; return norm2; } template< class Type > double SquareNorm( const Type* values , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += values[dim].squareNorm() ; return norm2 ; } double SquareDifference( const double* values1 , const double* values2 , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += ( values1[i] - values2[i] ) * ( values1[i] - values2[i] ) ; return norm2; } double SquareDifference( const float* values1 , const float* values2 , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += ( values1[i] - values2[i] ) * ( values1[i] - values2[i] ) ; return norm2; } template< class Type > double SquareDifference( const Type* values1 , const Type* values2 , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += ( values1[dim] - values2[dim] ).squareNorm() ; return norm2 ; } // This is the conjugate gradients solver. // The assumption is that the class SPDOperator defines a method operator()( const Real* , Real* ) which corresponds to applying a symmetric positive-definite operator. template< class Real > struct CGScratch { Real *r , *d , *q; CGScratch( void ) : r(NULL) , d(NULL) , q(NULL) , _dim(0){ ; } CGScratch( int dim ) : r(NULL) , d(NULL) , q(NULL){ resize(dim); } ~CGScratch( void ){ resize(0); } void resize( int dim ) { if( dim!=_dim ) { if( r ) delete[] r ; r = NULL; if( d ) delete[] d ; d = NULL; if( q ) delete[] q ; q = NULL; if( dim ) r = new Real[dim] , d = new Real[dim] , q = new Real[dim]; _dim = dim; } } protected: int _dim; }; template< class Real > struct PreconditionedCGScratch : public CGScratch< Real > { Real *s; PreconditionedCGScratch( void ) : CGScratch< Real >() , s(NULL){ ; } PreconditionedCGScratch( int dim ) : CGScratch< Real >() { resize(dim); } ~PreconditionedCGScratch( void ){ resize(0); } void resize( int dim ) { if( dim!=CGScratch< Real >::_dim ) { if( s ) delete[] s; s = NULL; if( dim ) s = new Real[dim]; } CGScratch< Real >::resize( dim ); } }; template< class Real > struct DiagonalPreconditioner { Real* iDiagonal; DiagonalPreconditioner( void ) : iDiagonal(NULL) , _dim(0){ ; } ~DiagonalPreconditioner( void ){ if( iDiagonal ) delete[] iDiagonal ; iDiagonal = NULL; } void set( const SparseMatrix< Real , int >& M ) { if( _dim!=M.Rows() ) { _dim = (int)M.Rows(); if( iDiagonal ) delete[] iDiagonal , iDiagonal = NULL; if( _dim>0 ) iDiagonal = new Real[_dim]; } memset( iDiagonal , 0 , sizeof(Real)*_dim ); #pragma omp parallel for for( int i=0 ; i<M.Rows() ; i++ ) { for( int j=0 ; j<M.rowSizes[i] ; j++ ) if( M[i][j].N==i ) iDiagonal[i] += M[i][j].Value; iDiagonal[i] = (Real)1./iDiagonal[i]; } } void operator()( const Real* in , Real* out ) const { #pragma omp parallel for for( int i=0 ; i<_dim ; i++ ) out[i] = in[i] * iDiagonal[i]; } protected: int _dim; }; template< class Real , class SPDOperator > int SolveCG( SPDOperator& L , int iters , int dim , const Real* b , Real* x , CGScratch< Real >* scratch=NULL , double eps=1e-8 , int threads=1 , bool verbose=false ) { eps *= eps; Real *r , *d , *q; if( scratch ) r = scratch->r , d = scratch->d , q = scratch->q; else r = new Real[dim] , d = new Real[dim] , q = new Real[dim]; memset( r , 0 , sizeof(Real)*dim ) , memset( d , 0 , sizeof(Real)*dim ) , memset( q , 0 , sizeof(Real)*dim ); double delta_new = 0 , delta_0; L( x , r ); #pragma omp parallel for num_threads( threads ) reduction( + : delta_new ) for( int i=0 ; i<dim ; i++ ) d[i] = r[i] = b[i] - r[i] , delta_new += r[i] * r[i]; delta_0 = delta_new; if( delta_new<eps ) { if( !scratch ) delete[] r , delete[] d , delete[] q; return 0; } int ii; for( ii=0 ; ii<iters && delta_new>eps*delta_0 ; ii++ ) { L( d , q ); double dDotQ = 0; #pragma omp parallel for num_threads( threads ) reduction( + : dDotQ ) for( int i=0 ; i<dim ; i++ ) dDotQ += d[i] * q[i]; Real alpha = Real( delta_new / dDotQ ); double delta_old = delta_new; delta_new = 0; const int RESET_COUNT = 50; if( (ii%RESET_COUNT)==(RESET_COUNT-1) ) { #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) x[i] += d[i] * alpha; L( x , r ); #pragma omp parallel for num_threads( threads ) reduction ( + : delta_new ) for( int i=0 ; i<dim ; i++ ) r[i] = b[i] - r[i] , delta_new += r[i] * r[i]; } else #pragma omp parallel for num_threads( threads ) reduction( + : delta_new ) for( int i=0 ; i<dim ; i++ ) r[i] -= q[i] * alpha , delta_new += r[i] * r[i] , x[i] += d[i] * alpha; Real beta = Real( delta_new / delta_old ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) d[i] = r[i] + d[i] * beta; } if( verbose ) { L( x , r ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) r[i] -= b[i]; printf( "CG: %d %g -> %g\n" , ii , SquareNorm( b , dim ) , SquareNorm( r , dim ) ); } if( !scratch ) delete[] r , delete[] d , delete[] q; return ii; } template< class Real , class SPDOperator , class SPDPreconditioner > int SolvePreconditionedCG( SPDOperator& L , SPDPreconditioner& Pinverse , int iters , int dim , const Real* b , Real* x , PreconditionedCGScratch< Real >* scratch=NULL , double eps=1e-8 , int threads=1 , bool verbose=false ) { eps *= eps; Real *r , *d , *q , *s; if( scratch ) r = scratch->r , d = scratch->d , q = scratch->q , s = scratch->s; else r = new Real[dim] , d = new Real[dim] , q = new Real[dim] , s = new Real[dim]; memset( r , 0 , sizeof(Real)*dim ) , memset( d , 0 , sizeof(Real)*dim ) , memset( q , 0 , sizeof(Real)*dim ) , memset( s , 0 , sizeof(Real)*dim ); double delta_new = 0 , delta_0; L( x , r ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) r[i] = b[i] - r[i]; Pinverse( r , d ); #pragma omp parallel for num_threads( threads ) reduction( + : delta_new ) for( int i=0 ; i<dim ; i++ ) delta_new += r[i] * d[i]; delta_0 = delta_new; if( delta_new<eps ) { if( !scratch ) delete[] r , delete[] d , delete[] q; return 0; } int ii; for( ii=0 ; ii<iters && delta_new>eps*delta_0 ; ii++ ) { L( d , q ); double dDotQ = 0; #pragma omp parallel for num_threads( threads ) reduction( + : dDotQ ) for( int i=0 ; i<dim ; i++ ) dDotQ += d[i] * q[i]; Real alpha = Real( delta_new / dDotQ ); const int RESET_COUNT = 50; #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) x[i] += d[i] * alpha; if( (ii%RESET_COUNT)==(RESET_COUNT-1) ) { L( x , r ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) r[i] = b[i] - r[i]; } else #pragma omp parallel for num_threads( threads ) reduction( + : delta_new ) for( int i=0 ; i<dim ; i++ ) r[i] -= q[i] * alpha; Pinverse( r , s ); double delta_old = delta_new; delta_new = 0; #pragma omp parallel for num_threads( threads ) reduction( + : delta_new ) for( int i=0 ; i<dim ; i++ ) delta_new += r[i] * s[i]; Real beta = Real( delta_new / delta_old ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) d[i] = s[i] + d[i] * beta; } if( verbose ) { L( x , r ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) r[i] -= b[i]; printf( "PCCG: %d %g -> %g\n" , ii , SquareNorm( b , dim ) , SquareNorm( r , dim ) ); } if( !scratch ) delete[] r , delete[] d , delete[] q , delete[] s; return ii; } #if USE_EIGEN #include <Eigen/Sparse> class EigenCholeskySolver { typedef Eigen::SimplicialLLT< Eigen::SparseMatrix< double > > Eigen_Solver; typedef Eigen::VectorXd Eigen_Vector; Eigen_Solver _solver; Eigen_Vector _eigenB , _eigenX; public: template< class Real > EigenCholeskySolver( const SparseMatrix< Real , int >& M ) { Eigen::SparseMatrix< double > eigenM( int( M.Rows() ) , int( M.Rows() ) ); std::vector< Eigen::Triplet<double> > triplets; triplets.reserve( M.Entries() ); for( int i=0 ; i<M.Rows() ; i++ ) for( int j=0 ; j<M.RowSize(i) ; j++ ) triplets.push_back( Eigen::Triplet< double >( i , M[i][j].N , M[i][j].Value ) ); eigenM.setFromTriplets( triplets.begin() , triplets.end() ); _solver.analyzePattern( eigenM ); _solver.factorize( eigenM ); if( _solver.info()!=Eigen::Success ) fprintf( stderr , "[ERROR] EigenSovler::EigenCholeskySolver Failed to factorize matrix\n" ) , exit(0); _eigenB.resize( M.Rows() ) , _eigenX.resize( M.Rows() ); } template< class Real > void solve( const Real* b , Real* x ) { #pragma omp parallel for for( int i=0 ; i<_eigenB.size() ; i++ ) _eigenB[i] = b[i]; _eigenX = _solver.solve( _eigenB ); #pragma omp parallel for for( int i=0 ; i<_eigenX.size() ; i++ ) x[i] = _eigenX[i]; } size_t dimension( void ) const { return _eigenB.size(); } template< class Real > static void Solve( const SparseMatrix< Real , int >& M , const Real* b , Real* x ){ EigenCholeskySolver solver( M ) ; solver.solve( b , x ); } }; class EigenCGSolver { Eigen::ConjugateGradient< Eigen::SparseMatrix< double > > _solver; Eigen::VectorXd _eigenB , _eigenX; Eigen::SparseMatrix< double > _eigenM; public: template< class Real > EigenCGSolver( const SparseMatrix< Real , int >& M ) { _eigenM.resize( (int)M.Rows() , (int)M.Rows() ); std::vector< Eigen::Triplet<double> > triplets; triplets.reserve( M.Entries() ); for( int i=0 ; i<M.Rows() ; i++ ) for( int j=0 ; j<M.RowSize(i) ; j++ ) triplets.push_back( Eigen::Triplet< double >( i , M[i][j].N , M[i][j].Value ) ); _eigenM.setFromTriplets( triplets.begin() , triplets.end() ); _solver.compute( _eigenM ); _solver.analyzePattern( _eigenM ); if( _solver.info()!=Eigen::Success ) fprintf( stderr , "[ERROR] EigenSovler::EigenCGSolver Failed to factorize matrix\n" ) , exit(0); _eigenB.resize( M.Rows() ) , _eigenX.resize( M.Rows() ); } template< class Real > void solve( const Real* b , Real* x , int iters ) { _solver.setMaxIterations( iters ); #pragma omp parallel for for( int i=0 ; i<_eigenB.size() ; i++ ) _eigenB[i] = b[i] , _eigenX[i] = x[i]; _eigenX = _solver.solveWithGuess( _eigenB , _eigenX ); #pragma omp parallel for for( int i=0 ; i<_eigenX.size() ; i++ ) x[i] = _eigenX[i]; } size_t dimension( void ) const { return _eigenB.size(); } template< class Real > static void Solve( const SparseMatrix< Real , int >& M , const Real* b , Real* x , int iters ){ EigenCGSolver solver( M ) ; solver._solver.setMaxIterations( iters ) ; solver.solve( b , x ); } }; #endif // USE_EIGEN #if USE_CHOLMOD class CholmodSolver { const static bool LOWER_TRIANGULAR = true; int dim; cholmod_factor* cholmod_L; cholmod_dense* cholmod_b; cholmod_sparse* cholmod_M; std::vector< bool > flaggedValues; template< class Real > void _init( const SparseMatrix< Real , int >& M ); template< class Real > bool _update( const SparseMatrix< Real , int >& M ); public: static cholmod_common cholmod_C; static bool cholmod_C_set; template< class Real > CholmodSolver( const SparseMatrix< Real , int >& M ); ~CholmodSolver( void ); template< class Real > void solve( ConstPointer( Real ) b , Pointer( Real ) x ); int nonZeros( void ) const; }; bool CholmodSolver::cholmod_C_set = false; cholmod_common CholmodSolver::cholmod_C; template< class Real > CholmodSolver::CholmodSolver( const SparseMatrix< Real , int >& M ){ _init( M ) , _update( M ); } template< class Real > void CholmodSolver::_init( const SparseMatrix< Real , int >& M ) { { if( !cholmod_C_set ) CHOLMOD(start)( &cholmod_C ); cholmod_C_set = true; } dim = M.rows; int maxEntries; if( LOWER_TRIANGULAR ) { maxEntries = (int)( ( M.Entries()-M.rows ) / 2 + M.rows ); cholmod_M = CHOLMOD(allocate_sparse)( dim , dim , maxEntries , 0 , 1 , -1 , CHOLMOD_REAL , &cholmod_C ); } else { maxEntries = (int)M.Entries(); cholmod_M = CHOLMOD(allocate_sparse)( dim , dim , maxEntries , 0 , 1 , 0 , CHOLMOD_REAL , &cholmod_C ); } cholmod_M->i = malloc( sizeof( SOLVER_LONG ) * maxEntries ); cholmod_M->x = malloc( sizeof( double ) * maxEntries ); SOLVER_LONG *_p = (SOLVER_LONG*)cholmod_M->p; SOLVER_LONG *_i = (SOLVER_LONG*)cholmod_M->i; int off = 0; dim = 0; for( int i=0 ; i<M.rows ; i++ ) { _p[dim++] = off; for( int j=0 ; j<M.rowSizes[i] ; j++ ) if( !LOWER_TRIANGULAR || M[i][j].N>=i ) _i[off++] = M[i][j].N; } _p[dim] = off; cholmod_L = CHOLMOD(analyze)( cholmod_M , &cholmod_C ); cholmod_b = CHOLMOD(allocate_dense)( dim , 1 , dim , cholmod_M->xtype , &cholmod_C ); } template< class Real > bool CholmodSolver::_update( const SparseMatrix< Real , int >& M ) { double *_x = (double*)cholmod_M->x; int off = 0; SOLVER_LONG *_p = (SOLVER_LONG*)cholmod_M->p; #pragma omp parallel for for( int i=0 ; i<M.rows ; i++ ) { int off = (int)_p[i]; for( int j=0 ; j<M.rowSizes[i] ; j++ ) if( !LOWER_TRIANGULAR || M[i][j].N>=i ) _x[off++] = double( M[i][j].Value ); } cholmod_C.print = 0; CHOLMOD(factorize)( cholmod_M , cholmod_L , &cholmod_C ); if( cholmod_C.status==CHOLMOD_NOT_POSDEF ) { fprintf( stderr , "[WARNING] Matrix not positive-definite\n" ); return false; } else if( cholmod_C.status==CHOLMOD_OUT_OF_MEMORY ) { fprintf( stderr , "[WARNING] CHOLMOD ran out of memory\n" ); return false; } else if( cholmod_C.status!=CHOLMOD_OK ) { fprintf( stderr , "[WARNING] CHOLMOD status not OK: %d\n" , cholmod_C.status ); return false; } return true; } CholmodSolver::~CholmodSolver( void ) { if( cholmod_L ) CHOLMOD(free_factor)( &cholmod_L , &cholmod_C ) , cholmod_L = NULL; if( cholmod_b ) CHOLMOD(free_dense )( &cholmod_b , &cholmod_C ) , cholmod_b = NULL; if( cholmod_M ) CHOLMOD(free_sparse)( &cholmod_M , &cholmod_C ) , cholmod_M = NULL; } template< class Real > void CholmodSolver::solve( ConstPointer( Real ) b , Pointer( Real ) x ) { double* _b = (double*)cholmod_b->x; for( int i=0 ; i<dim ; i++ ) _b[i] = (double)b[i]; cholmod_dense* cholmod_x = CHOLMOD(solve)( CHOLMOD_A , cholmod_L , cholmod_b , &cholmod_C ); double* _x = (double*)cholmod_x->x; for( int i=0 ; i<dim ; i++ ) x[i] = (Real)_x[i]; CHOLMOD(free_dense)( &cholmod_x , &cholmod_C ); } int CholmodSolver::nonZeros( void ) const { long long nz = 0; if( cholmod_L->xtype != CHOLMOD_PATTERN && !(cholmod_L->is_super ) ) for( int i=0 ; i<cholmod_L->n ; i++ ) nz += ((SOLVER_LONG*)cholmod_L->nz)[i]; bool examine_super = false; if( cholmod_L->xtype != CHOLMOD_PATTERN ) examine_super = true ; else examine_super = ( ((int*)cholmod_L->s)[0] != (-1)); if( examine_super ) { /* check and print each supernode */ for (int s = 0 ; s < cholmod_L->nsuper ; s++) { int k1 = ((int*)cholmod_L->super) [s] ; int k2 = ((int*)cholmod_L->super) [s+1] ; int psi = ((int*)cholmod_L->pi)[s] ; int psend = ((int*)cholmod_L->pi)[s+1] ; int nsrow = psend - psi ; int nscol = k2 - k1 ; nz += nscol * nsrow - (nscol*nscol - nscol)/2 ; } } return (int)nz; } #endif // USE_CHOLMOD
mandel-omp-taskgroup.c
/* * Sequential Mandelbrot program * * This program computes and displays all or part of the Mandelbrot * set. By default, it examines all points in the complex plane * that have both real and imaginary parts between -2 and 2. * Command-line parameters allow zooming in on a specific part of * this range. * * Usage: * mandel [-i maxiter -c x0 y0 -s size -w windowsize] * where * maxiter denotes the maximum number of iterations at each point -- by default 1000 * x0, y0, and size specify the range to examine (a square * centered at (x0 + iy0) of size 2*size by 2*size -- by default, * a square of size 4 by 4 centered at the origin) * windowsize denotes the size of the image (diplay window) to compute * * Input: none, except the optional command-line arguments * Output: a graphical display as described in Wilkinson & Allen, * displayed using the X Window system, plus text output to * standard output showing the above parameters, plus execution * time in seconds. * * Code based on the original code from Web site for Wilkinson and Allen's * text on parallel programming: * http://www.cs.uncc.edu/~abw/parallel/par_prog/ * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <unistd.h> #include <malloc.h> #if _DISPLAY_ #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #endif #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6fs\n",(_m), stamp); /* Default values for things. */ #define N 2 /* size of problem space (x, y from -N to N) */ #define NPIXELS 800 /* size of display window in pixels */ int row, col; // variables used to traverse the problem space /* Structure definition for complex numbers */ typedef struct { double real, imag; } complex; #if _DISPLAY_ /* Functions for GUI */ #include "mandelbrot-gui.h" /* has setup(), interact() */ #endif void mandelbrot(int height, int width, double real_min, double imag_min, double scale_real, double scale_imag, int maxiter, #if _DISPLAY_ int setup_return, Display *display, Window win, GC gc, double scale_color, double min_color) #else int ** output) #endif { /* Calculate points and save/display */ #pragma omp parallel #pragma omp single for (int row = 0; row < height; ++row) { #pragma omp taskgroup for (int col = 0; col < width; ++col) { #pragma omp task firstprivate(row, col) { complex z, c; z.real = z.imag = 0; /* Scale display coordinates to actual region */ c.real = real_min + ((double) col * scale_real); c.imag = imag_min + ((double) (height-1-row) * scale_imag); /* height-1-row so y axis displays * with larger values at top */ /* Calculate z0, z1, .... until divergence or maximum iterations */ int k = 0; double lengthsq, temp; do { temp = z.real*z.real - z.imag*z.imag + c.real; z.imag = 2*z.real*z.imag + c.imag; z.real = temp; lengthsq = z.real*z.real + z.imag*z.imag; ++k; } while (lengthsq < (N*N) && k < maxiter); #if _DISPLAY_ /* Scale color and display point */ long color = (long) ((k-1) * scale_color) + min_color; if (setup_return == EXIT_SUCCESS) { #pragma omp critical { XSetForeground (display, gc, color); XDrawPoint (display, win, gc, col, row); } } #else output[row][col]=k; #endif } } } } int main(int argc, char *argv[]) { int maxiter = 1000; double real_min; double real_max; double imag_min; double imag_max; int width = NPIXELS; /* dimensions of display window */ int height = NPIXELS; double size=N, x0 = 0, y0 = 0; #if _DISPLAY_ Display *display; Window win; GC gc; int setup_return; long min_color = 0, max_color = 0; double scale_color; #else int ** output; FILE *fp = NULL; #endif double scale_real, scale_imag; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-i")==0) { maxiter = atoi(argv[++i]); } else if (strcmp(argv[i], "-w")==0) { width = atoi(argv[++i]); height = width; } else if (strcmp(argv[i], "-s")==0) { size = atof(argv[++i]); } #if !_DISPLAY_ else if (strcmp(argv[i], "-o")==0) { if((fp=fopen("parallel.out", "wb"))==NULL) { fprintf(stderr, "Unable to open file\n"); return EXIT_FAILURE; } } #endif else if (strcmp(argv[i], "-c")==0) { x0 = atof(argv[++i]); y0 = atof(argv[++i]); } else { #if _DISPLAY_ fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); fprintf(stderr, " -o to write computed image to disk (default no file generated)\n"); #endif fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n"); #if _DISPLAY_ fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n"); #else fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n"); #endif fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n"); fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n"); return EXIT_FAILURE; } } real_min = x0 - size; real_max = x0 + size; imag_min = y0 - size; imag_max = y0 + size; /* Produce text output */ fprintf(stdout, "\n"); fprintf(stdout, "Mandelbrot program\n"); fprintf(stdout, "center = (%g, %g), size = %g\n", (real_max + real_min)/2, (imag_max + imag_min)/2, (real_max - real_min)/2); fprintf(stdout, "maximum iterations = %d\n", maxiter); fprintf(stdout, "\n"); #if _DISPLAY_ /* Initialize for graphical display */ setup_return = setup(width, height, &display, &win, &gc, &min_color, &max_color); if (setup_return != EXIT_SUCCESS) { fprintf(stderr, "Unable to initialize display, continuing\n"); return EXIT_FAILURE; } #else output = malloc(height*sizeof(int *)); for (int row = 0; row < height; ++row) output[row] = malloc(width*sizeof(int)); #endif /* Compute factors to scale computational region to window */ scale_real = (double) (real_max - real_min) / (double) width; scale_imag = (double) (imag_max - imag_min) / (double) height; #if _DISPLAY_ /* Compute factor for color scaling */ scale_color = (double) (max_color - min_color) / (double) (maxiter - 1); #endif /* Start timing */ double stamp; START_COUNT_TIME; #if _DISPLAY_ mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, setup_return, display, win, gc, scale_color, min_color); #else mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, output); #endif /* End timing */ STOP_COUNT_TIME("Total execution time"); /* Be sure all output is written */ #if _DISPLAY_ if (setup_return == EXIT_SUCCESS) { XFlush (display); } #else if (fp != NULL) { for (int row = 0; row < height; ++row) if(fwrite(output[row], sizeof(int), width, fp) != width) { fprintf(stderr, "Output file not written correctly\n"); } } #endif #if _DISPLAY_ /* Wait for user response, then exit program */ if (setup_return == EXIT_SUCCESS) { interact(display, &win, width, height, real_min, real_max, imag_min, imag_max); } return EXIT_SUCCESS; #endif }
omp_xengine.c
/* Perform the outer product summation (X-engine) This only fills in the lower triangular matrix and maps the 1-d baseline index to the (2-d) triangular index from the formula row = floor(-0.5 + sqrt(0.25 + 2*k)); column = k - row*(row+1)/2 Not meant to be fast, rather to work in a similar fashion as the GPU implementation. */ #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "xgpu.h" #include "xgpu_info.h" #define cxmac(acc,z0,z1) \ do { \ acc.real += (float)z0.real * (float)z1.real + (float)z0.imag * (float)z1.imag; \ acc.imag += (float)z0.imag * (float)z1.real - (float)z0.real * (float)z1.imag; \ } while (0) #ifndef COMPLEX_BLOCK_SIZE #define COMPLEX_BLOCK_SIZE 1 #elif COMPLEX_BLOCK_SIZE != 1 && COMPLEX_BLOCK_SIZE != 32 #error COMPLEX_BLOCK_SIZE must be 1 or 32 #endif void xgpuOmpXengine(Complex *matrix_h, ComplexInput *array_h) { #ifdef _OPENMP int num_procs = omp_get_num_procs(); #endif #pragma omp parallel num_threads(num_procs) { int i, t; #pragma omp for schedule(dynamic) for(i=0; i<NFREQUENCY*NBASELINE; i++){ int f = i/NBASELINE; int k = i - f*NBASELINE; int station1 = -0.5 + sqrt(0.25 + 2*k); int station2 = k - ((station1+1)*station1)/2; Complex sumXX; sumXX.real = 0.0; sumXX.imag = 0.0; Complex sumXY; sumXY.real = 0.0; sumXY.imag = 0.0; Complex sumYX; sumYX.real = 0.0; sumYX.imag = 0.0; Complex sumYY; sumYY.real = 0.0; sumYY.imag = 0.0; ComplexInput inputRowX, inputRowY, inputColX, inputColY; for(t=0; t<NTIME; t++){ #if COMPLEX_BLOCK_SIZE == 1 inputRowX = array_h[((t*NFREQUENCY + f)*NSTATION + station1)*NPOL]; inputRowY = array_h[((t*NFREQUENCY + f)*NSTATION + station1)*NPOL + 1]; inputColX = array_h[((t*NFREQUENCY + f)*NSTATION + station2)*NPOL]; inputColY = array_h[((t*NFREQUENCY + f)*NSTATION + station2)*NPOL + 1]; #else // Probably not the cleanest way to do this... int i1 = ((t*NFREQUENCY + f)*NSTATION + station1)*NPOL; int i2 = ((t*NFREQUENCY + f)*NSTATION + station2)*NPOL; i1 = 32*(i1/32) + ((i1/2)%16); i2 = 32*(i2/32) + ((i2/2)%16); ComplexInput rowXYreal = array_h[i1]; ComplexInput rowXYimag = array_h[i1+16]; ComplexInput colXYreal = array_h[i2]; ComplexInput colXYimag = array_h[i2+16]; inputRowX.real = rowXYreal.real; inputRowX.imag = rowXYimag.real; inputRowY.real = rowXYreal.imag; inputRowY.imag = rowXYimag.imag; inputColX.real = colXYreal.real; inputColX.imag = colXYimag.real; inputColY.real = colXYreal.imag; inputColY.imag = colXYimag.imag; #endif cxmac(sumXX, inputRowX, inputColX); cxmac(sumXY, inputRowX, inputColY); cxmac(sumYX, inputRowY, inputColX); cxmac(sumYY, inputRowY, inputColY); } matrix_h[4*i ] = sumXX; matrix_h[4*i + 1] = sumXY; matrix_h[4*i + 2] = sumYX; matrix_h[4*i + 3] = sumYY; // fprintf(stdout,"OUTER:%f %f\n",crealf(matrix_h[4*i]), cimag(matrix_h[4*i])); } //end parallel for loop } //end parallel segment }