source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_binop__minus_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint8) // A*D function (colscale): GB (_AxD__minus_uint8) // D*A function (rowscale): GB (_DxB__minus_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint8) // C=scalar+B GB (_bind1st__minus_uint8) // C=scalar+B' GB (_bind1st_tran__minus_uint8) // C=A+scalar GB (_bind2nd__minus_uint8) // C=A'+scalar GB (_bind2nd_tran__minus_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT8 || GxB_NO_MINUS_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GCTFoundationClangConstants.h
// // GCTFoundationClangConstants.h // GCTFoundationDemo // // Created by Later on 2018/2/20. // Copyright © 2018年 GEEKCODE.In. All rights reserved. // #ifndef GCTFoundationClangConstants_h #define GCTFoundationClangConstants_h #pragma mark - Clang #define GCTArgumentToString(macro) #macro #define GCTClangWarningConcat(warning_name) ArgumentToString(clang diagnostic ignored warning_name) // 参数可直接传入 clang 的 warning 名,warning 列表参考:http://fuckingclangwarnings.com/ #define GCTIgnoreClangWarningBegin(warningName) _Pragma("clang diagnostic push") _Pragma(ClangWarningConcat(#warningName)) #define GCTIgnoreClangWarningEnd _Pragma("clang diagnostic pop") //-WCFString-literal input conversion stopped due to an input byte that does not belong to the input codeset UTF-8 #define GCT_IGNORE_WCFSTRING_LITERAL_WARNING_BEGIN GCTIgnoreClangWarningBegin(-WCFString-literal) #define GCT_IGNORE_WCFSTRING_LITERAL_WARNING_END GCTIgnoreClangWarningEnd //-WNSObject-attribute __attribute ((NSObject)) may be put on a typedef only, attribute is ignored #define GCT_IGNORE_WNSOBJECT_ATTRIBUTE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-WNSObject-attribute) #define GCT_IGNORE_WNSOBJECT_ATTRIBUTE_WARNING_END GCTIgnoreClangWarningEnd //-Wabstract-vbase-init initializer for virtual base class %0 of abstract class %1 will never be used #define GCT_IGNORE_WABSTRACT_VBASE_INIT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wabstract-vbase-init) #define GCT_IGNORE_WABSTRACT_VBASE_INIT_WARNING_END GCTIgnoreClangWarningEnd //-Waddress-of-array-temporary pointer is initialized by a temporary array, which will be destroyed at the end of the full-expression #define GCT_IGNORE_WADDRESS_OF_ARRAY_TEMPORARY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Waddress-of-array-temporary) #define GCT_IGNORE_WADDRESS_OF_ARRAY_TEMPORARY_WARNING_END GCTIgnoreClangWarningEnd //-Warc-maybe-repeated-use-of-weak "weak %select{variable|property|implicit property|instance variable}0 %1 may be accessed multiple times in this %select{function|method|block|lambda}2 and may be unpredictably set to nil assign to a strong variable to keep the object alive #define GCT_IGNORE_MAYBE_REPEATED_USE_OF_WEAK_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Warc-maybe-repeated-use-of-weak) #define GCT_IGNORE_MAYBE_REPEATED_USE_OF_WEAK_WARNING_END GCTIgnoreClangWarningEnd //-Warc-non-pod-memaccess %select{destination for|source of}0 this %1 call is a pointer to ownership-qualified type %2 #define GCT_IGNORE_NON_POD_MEMACCESS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Warc-non-pod-memacces) #define GCT_IGNORE_NON_POD_MEMACCESS_WARNING_END GCTIgnoreClangWarningEnd //-Warc-performSelector-leaks performSelector may cause a leak because its selector is unknown #define GCT_IGNORE_PERFORM_SEL_LEAKS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Warc-performSelector-leaks) #define GCT_IGNORE_PERFORM_SEL_LEAKS_WARNING_END GCTIgnoreClangWarningEnd //-Warc-repeated-use-of-weak weak %select{variable|property|implicit property|instance variable}0 %1 is accessed multiple times in this %select{function|method|block|lambda}2 but may be unpredictably set to nil assign to a strong variable to keep the object alive #define GCT_IGNORE_REPEATED_USE_OF_WEAK_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Warc-repeated-use-of-weak) #define GCT_IGNORE_REPEATED_USE_OF_WEAK_WARNING_END GCTIgnoreClangWarningEnd //-Warc-retain-cycles capturing %0 strongly in this block is likely to lead to a retain cycle #define GCT_IGNORE_RETAIN_CYCLES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Warc-retain-cycles) #define GCT_IGNORE_RETAIN_CYCLES_WARNING_END GCTIgnoreClangWarningEnd //-Warc-unsafe-retained-assign assigning retained object to unsafe property object will be released after assignment //-Warc-unsafe-retained-assign assigning %select{array literal|dictionary literal|numeric literal|boxed expression|should not happen|block literal}0 to a weak %select{property|variable}1 object will be released after assignment //-Warc-unsafe-retained-assign assigning retained object to %select{weak|unsafe_unretained}0 %select{property|variable}1 object will be released after assignment #define GCT_IGNORE_UNSAFE_RETAINED_ASSIGN_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Warc-unsafe-retained-assign) #define GCT_IGNORE_UNSAFE_RETAINED_ASSIGN_WARNING_END GCTIgnoreClangWarningEnd //-Warray-bounds array index %0 is past the end of the array (which contains %1 element%s2) //-Warray-bounds array index %0 is before the beginning of the array //-Warray-bounds 'static' has no effect on zero-length arrays //-Warray-bounds array argument is too small contains %0 elements, callee requires at least %1 #define GCT_IGNORE_WARRAY_BOUNDS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Warray-bounds) #define GCT_IGNORE_WARRAY_BOUNDS_WARNING_END GCTIgnoreClangWarningEnd //-Warray-bounds-pointer-arithmetic the pointer incremented by %0 refers past the end of the array (that contains %1 element%s2) //-Warray-bounds-pointer-arithmetic the pointer decremented by %0 refers before the beginning of the array #define GCT_IGNORE_WARRAY_BOUNDS_POINTER_ARITHMETIC_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Warray-bounds-pointer-arithmetic) #define GCT_IGNORE_WARRAY_BOUNDS_POINTER_ARITHMETIC_WARNING_END GCTIgnoreClangWarningEnd //-Wassign-enum integer constant not in range of enumerated type %0 #define GCT_IGNORE_WASSIGN_ENUM_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wassign-enum) #define GCT_IGNORE_WASSIGN_ENUM_WARNING_END GCTIgnoreClangWarningEnd //-Watomic-property-with-user-defined-accessor writable atomic property %0 cannot pair a synthesized %select{getter|setter}1 with a user defined %select{getter|setter}2 #define GCT_IGNORE_WATOMIC_PROPERTY_WITH_USER_DEFINED_ACCESSOR_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Watomic-property-with-user-defined-accessor) #define GCT_IGNORE_WATOMIC_PROPERTY_WITH_USER_DEFINED_ACCESSOR_WARNING_END GCTIgnoreClangWarningEnd //-Wattributes unknown attribute %0 ignored #define GCT_IGNORE_WATTRIBUTES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wattributes) #define GCT_IGNORE_WATTRIBUTES_WARNING_END GCTIgnoreClangWarningEnd //-Wauto-var-id 'auto' deduced as 'id' in declaration of %0 #define GCT_IGNORE_WAUTO_VAR_ID_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wauto-var-id) #define GCT_IGNORE_WAUTO_VAR_ID_WARNING_END GCTIgnoreClangWarningEnd //-Wavailability unknown platform %0 in availability macro //-Wavailability overriding method %select{introduced after|deprecated before|obsoleted before}0 overridden method on %1 (%2 vs. %3) //-Wavailability availability does not match previous declaration //-Wavailability overriding method cannot be unavailable on %0 when its overridden method is available //-Wavailability feature cannot be %select{introduced|deprecated|obsoleted}0 in %1 version %2 before it was %select{introduced|deprecated|obsoleted}3 in version %4 attribute ignored #define GCT_IGNORE_WAVAILABILITY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wavailability) #define GCT_IGNORE_WAVAILABILITY_WARNING_END GCTIgnoreClangWarningEnd //-Wbad-function-cast cast from function call of type %0 to non-matching type %1 #define GCT_IGNORE_WBAD_FUNCTION_CAST_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wbad-function-cast) #define GCT_IGNORE_WBAD_FUNCTION_CAST_WARNING_END GCTIgnoreClangWarningEnd //-Wbitfield-constant-conversion implicit truncation from %2 to bitfield changes value from %0 to %1 #define GCT_IGNORE_WBITFIELD_CONSTANT_CONVERSION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wbitfield-constant-conversion) #define GCT_IGNORE_WBITFIELD_CONSTANT_CONVERSION_WARNING_END GCTIgnoreClangWarningEnd //-Wbitwise-op-parentheses '&' within '|' #define GCT_IGNORE_WBITWISE_OP_PARENTHESES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wbitwise-op-parentheses) #define GCT_IGNORE_WBITWISE_OP_PARENTHESES_WARNING_END GCTIgnoreClangWarningEnd //-Wbool-conversion "initialization of pointer of type %0 to null from a constant boolean " "expression #define GCT_IGNORE_WBOOL_CONVERSION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wbool-conversion) #define GCT_IGNORE_WBOOL_CONVERSION_WARNING_END GCTIgnoreClangWarningEnd //-Wbridge-cast %0 cannot bridge to %1 //-Wbridge-cast %0 bridges to %1, not %2 #define GCT_IGNORE_WBRIDGE_CAST_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wbridge-cast) #define GCT_IGNORE_WBRIDGE_CAST_WARNING_END GCTIgnoreClangWarningEnd //-Wbuiltin-requires-header declaration of built-in function '%0' requires inclusion of the header stdio.h //-Wbuiltin-requires-header declaration of built-in function '%0' requires inclusion of the header setjmp.h //-Wbuiltin-requires-header declaration of built-in function '%0' requires inclusion of the header ucontext.h #define GCT_IGNORE_WBUILTIN_REQUIRES_HEADER_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wbuiltin-requires-header) #define GCT_IGNORE_WBUILTIN_REQUIRES_HEADER_WARNING_END GCTIgnoreClangWarningEnd //-Wc++-compat %select{|empty }0%select{struct|union}1 has size 0 in C, %select{size 1|non-zero size}2 in C++ #define GCT_IGNORE_WC_COPMAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++-compat) #define GCT_IGNORE_WC_COPMAT_WARNING_END GCTIgnoreClangWarningEnd //-Wc++11-compat explicit instantiation cannot be 'inline' //-Wc++11-compat explicit instantiation of %0 must occur at global scope //-Wc++11-compat explicit instantiation of %0 not in a namespace enclosing %1 //-Wc++11-compat explicit instantiation of %q0 must occur in namespace %1 #define GCT_IGNORE_WC11_COPMAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++11-compat) #define GCT_IGNORE_WC11_COPMAT_WARNING_END GCTIgnoreClangWarningEnd //-Wc++11-narrowing constant expression evaluates to %0 which cannot be narrowed to type %1 in C++11 //-Wc++11-narrowing type %0 cannot be narrowed to %1 in initializer list in C++11 //-Wc++11-narrowing non-constant-expression cannot be narrowed from type %0 to %1 in initializer list in C++11 #define GCT_IGNORE_WC11_NARROWING_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++11-narrowing) #define GCT_IGNORE_WC11_NARROWING_WARNING_END GCTIgnoreClangWarningEnd //-Wc++98-c++11-compat type definition in a constexpr %select{function|constructor}0 is incompatible with C++ standards before C++1y //-Wc++98-c++11-compat use of this statement in a constexpr %select{function|constructor}0 is incompatible with C++ standards before C++1y //-Wc++98-c++11-compat init-captures.def warn_cxx11_compat_init_capture : Warning "initialized lambda captures are incompatible with C++ standards " "before C++1y //-Wc++98-c++11-compat variable declaration in a constexpr %select{function|constructor}0 is incompatible with C++ standards before C++1y //-Wc++98-c++11-compat constexpr function with no return statements is incompatible with C++ standards before C++1y //-Wc++98-c++11-compat multiple return statements in constexpr function is incompatible with C++ standards before C++1y //-Wc++98-c++11-compat variable templates are incompatible with C++ standards before C++1y #define GCT_IGNORE_WC98_C11_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++98-c++11-compat) #define GCT_IGNORE_WC98_C11_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wc++98-compat substitution failure due to access control is incompatible with C++98 //-Wc++98-compat %select{anonymous struct|union}0 member %1 with a non-trivial %select{constructor|copy constructor|move constructor|copy assignment operator|move assignment operator|destructor}2 is incompatible with C++98 //-Wc++98-compat enumeration type in nested name specifier is incompatible with C++98 //-Wc++98-compat static data member %0 in union is incompatible with C++98 //-Wc++98-compat default template arguments for a function template are incompatible with C++98 //-Wc++98-compat scalar initialized from empty initializer list is incompatible with C++98 //-Wc++98-compat befriending %1 without '%select{struct|interface|union|class|enum}0' keyword is incompatible with C++98 //-Wc++98-compat use of null pointer as non-type template argument is incompatible with C++98 //-Wc++98-compat friend declaration naming a member of the declaring class is incompatible with C++98 //-Wc++98-compat non-class friend type %0 is incompatible with C++98 //-Wc++98-compat befriending enumeration type %0 is incompatible with C++98 //-Wc++98-compat use of non-static data member %0 in an unevaluated context is incompatible with C++98 //-Wc++98-compat friend function %0 would be implicitly redefined in C++98 //-Wc++98-compat %select{class template|class template partial|variable template|variable template partial|function template|member function|static data member|member class|member enumeration}0 specialization of %1 outside namespace %2 is incompatible with C++98 //-Wc++98-compat reference initialized from initializer list is incompatible with C++98 //-Wc++98-compat redundant parentheses surrounding address non-type template argument are incompatible with C++98 //-Wc++98-compat initialization of initializer_list object is incompatible with C++98 //-Wc++98-compat use of 'template' keyword outside of a template is incompatible with C++98 //-Wc++98-compat non-type template argument referring to %select{function|object}0 %1 with internal linkage is incompatible with C++98 //-Wc++98-compat use of 'typename' outside of a template is incompatible with C++98 //-Wc++98-compat passing object of trivial but non-POD type %0 through variadic %select{function|block|method|constructor}1 is incompatible with C++98 //-Wc++98-compat goto would jump into protected scope in C++98 //-Wc++98-compat constructor call from initializer list is incompatible with C++98 //-Wc++98-compat 'auto' type specifier is incompatible with C++98 //-Wc++98-compat delegating constructors are incompatible with C++98 //-Wc++98-compat 'constexpr' specifier is incompatible with C++98 //-Wc++98-compat inheriting constructors are incompatible with C++98 //-Wc++98-compat explicit conversion functions are incompatible with C++98 //-Wc++98-compat switch case would be in a protected scope in C++98 //-Wc++98-compat '%0' type specifier is incompatible with C++98 //-Wc++98-compat indirect goto might cross protected scopes in C++98 #define GCT_IGNORE_WC98_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++98-compat) #define GCT_IGNORE_WC98_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wc++98-compat-pedantic cast between pointer-to-function and pointer-to-object is incompatible with C++98 //-Wc++98-compat-pedantic implicit conversion from array size expression of type %0 to %select{integral|enumeration}1 type %2 is incompatible with C++98 #define GCT_IGNORE_WC98_COMPAT_PEDANTIC_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++98-compat-pedantic) #define GCT_IGNORE_WC98_COMPAT_PEDANTIC_WARNING_END GCTIgnoreClangWarningEnd //-Wcast-align cast from %0 to %1 increases required alignment from %2 to %3 #define GCT_IGNORE_WCAST_ALOGN_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wcast-align) #define GCT_IGNORE_WCAST_ALOGN_WARNING_END GCTIgnoreClangWarningEnd //-Wcast-of-sel-type cast of type %0 to %1 is deprecated use sel_getName instead #define GCT_IGNORE_WCAST_OF_SEL_TYPE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wcast-of-sel-type) #define GCT_IGNORE_WCAST_OF_SEL_TYPE_WARNING_END GCTIgnoreClangWarningEnd //-Wchar-subscripts array subscript is of type 'char' #define GCT_IGNORE_WCHAR_SUBSCRIPTS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wchar-subscripts) #define GCT_IGNORE_WCHAR_SUBSCRIPTS_WARNING_END GCTIgnoreClangWarningEnd //-Wconditional-uninitialized variable %0 may be uninitialized when %select{used here|captured by block}1 #define GCT_IGNORE_WCONDITIONAL_UNINITIALIZED_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wconditional-uninitialized) #define GCT_IGNORE_WCONDITIONAL_UNINITIALIZED_WARNING_END GCTIgnoreClangWarningEnd //-Wconstant-logical-operand use of logical '%0' with constant operand #define GCT_IGNORE_WCONSTANT_LOGICAL_OPERAND_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wconstant-logical-operand) #define GCT_IGNORE_WCONSTANT_LOGICAL_OPERAND_WARNING_END GCTIgnoreClangWarningEnd //-Wconstexpr-not-const 'constexpr' non-static member function will not be implicitly 'const' in C++1y add 'const' to avoid a change in behavior #define GCT_IGNORE_WCONSTANT_NOT_CONST_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wconstexpr-not-const) #define GCT_IGNORE_WCONSTANT_NOT_CONST_WARNING_END GCTIgnoreClangWarningEnd //-Wconsumed state of variable '%0' must match at the entry and exit of loop //-Wconsumed parameter '%0' not in expected state when the function returns: expected '%1', observed '%2' //-Wconsumed argument not in expected state expected '%0', observed '%1' //-Wconsumed invalid invocation of method '%0' on a temporary object while it is in the '%1' state //-Wconsumed return state set for an unconsumable type '%0' //-Wconsumed consumed analysis attribute is attached to member of class '%0' which isn't marked as consumable //-Wconsumed invalid invocation of method '%0' on object '%1' while it is in the '%2' state //-Wconsumed return value not in expected state expected '%0', observed '%1' #define GCT_IGNORE_WCONSUMED_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wconsumed) #define GCT_IGNORE_WCONSUMED_WARNING_END GCTIgnoreClangWarningEnd //-Wconversion implicit conversion discards imaginary component: %0 to %1 //-Wconversion non-type template argument with value '%0' converted to '%1' for unsigned template parameter of type %2 //-Wconversion implicit conversion loses floating-point precision: %0 to %1 //-Wconversion implicit conversion loses integer precision: %0 to %1 //-Wconversion non-type template argument value '%0' truncated to '%1' for template parameter of type %2 //-Wconversion implicit conversion turns vector to scalar: %0 to %1 //-Wconversion implicit conversion turns floating-point number into integer: %0 to %1 #define GCT_IGNORE_WCONVERSION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wconversion) #define GCT_IGNORE_WCONVERSION_WARNING_END GCTIgnoreClangWarningEnd //-Wcovered-switch-default default label in switch which covers all enumeration values #define GCT_IGNORE_WCOVERED_SWITCH_DEFAULT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wcovered-switch-default) #define GCT_IGNORE_WCOVERED_SWITCH_DEFAULT_WARNING_END GCTIgnoreClangWarningEnd //-Wcustom-atomic-properties atomic by default property %0 has a user defined %select{getter|setter}1 (property should be marked 'atomic' if this is intended) #define GCT_IGNORE_WCUSTOM_ATOMIC_PROPERTIES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wcustom-atomic-properties) #define GCT_IGNORE_WCUSTOM_ATOMIC_PROPERTIES_WARNING_END GCTIgnoreClangWarningEnd //-Wdangling-field initializing pointer member %0 with the stack address of parameter %1 //-Wdangling-field binding reference %select{|subobject of }1member %0 to a temporary value //-Wdangling-field binding reference member %0 to stack allocated parameter %1 #define GCT_IGNORE_WDANGLING_FIELD_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdangling-field) #define GCT_IGNORE_WDANGLING_FIELD_WARNING_END GCTIgnoreClangWarningEnd //-Wdangling-initializer-list array backing the initializer list will be destroyed at the end of %select{the full-expression|the constructor}0 #define GCT_IGNORE_WDANGLING_INITIALIZER_LIST_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdangling-initializer-list) #define GCT_IGNORE_WDANGLING_INITIALIZER_LIST_WARNING_END GCTIgnoreClangWarningEnd //-Wdelete-incomplete deleting pointer to incomplete type %0 may cause undefined behavior #define GCT_IGNORE_WDELETE_INCOMPLETE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdelete-incomplete) #define GCT_IGNORE_WDELETE_INCOMPLETE_WARNING_END GCTIgnoreClangWarningEnd //-Wdelete-non-virtual-dtor delete called on %0 that is abstract but has non-virtual destructor //-Wdelete-non-virtual-dtor delete called on %0 that has virtual functions but non-virtual destructor #define GCT_IGNORE_WDELETE_NON_VIRTUAL_DTOR_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdelete-non-virtual-dtor) #define GCT_IGNORE_WDELETE_NON_VIRTUAL_DTOR_WARNING_END GCTIgnoreClangWarningEnd //-Wdeprecated access declarations are deprecated use using declarations instead //-Wdeprecated definition of implicit copy %select{constructor|assignment operator}1 for %0 is deprecated because it has a user-declared %select{copy %select{assignment operator|constructor}1|destructor}2 //-Wdeprecated dynamic exception specifications are deprecated #define GCT_IGNORE_WDEPRECATED_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdeprecated) #define GCT_IGNORE_WDEPRECATED_WARNING_END GCTIgnoreClangWarningEnd //-Wdeprecated-increment-bool incrementing expression of type bool is deprecated #define GCT_IGNORE_WDEPRECATED_INCREMENT_BOOL_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdeprecated-increment-bool) #define GCT_IGNORE_WDEPRECATED_INCREMENT_BOOL_WARNING_END GCTIgnoreClangWarningEnd //-Wdeprecated-objc-isa-usage assignment to Objective-C's isa is deprecated in favor of object_setClass() //-Wdeprecated-objc-isa-usage direct access to Objective-C's isa is deprecated in favor of object_getClass() #define GCT_IGNORE_WDEPRECATED_OBJC_ISA_USAGE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdeprecated-objc-isa-usage) #define GCT_IGNORE_WDEPRECATED_OBJC_ISA_USAGE_WARNING_END GCTIgnoreClangWarningEnd //-Wdeprecated-objc-pointer-introspection bitmasking for introspection of Objective-C object pointers is strongly discouraged #define GCT_IGNORE_WDEPRECATED_OBJC_POINTER_INTROSPECTION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdeprecated-objc-pointer-introspection) #define GCT_IGNORE_WDEPRECATED_OBJC_POINTER_INTROSPECTION_WARNING_END GCTIgnoreClangWarningEnd //-Wdeprecated-objc-pointer-introspection-performSelector warn_objc_pointer_masking.Text #define GCT_IGNORE_WDEPRECATED_OBJC_POINTER_INTROSPECTION_PERFORMSELECTOR_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdeprecated-objc-pointer-introspection-performSelector) #define GCT_IGNORE_WDEPRECATED_OBJC_POINTER_INTROSPECTION_PERFORMSELECTOR_WARNING_END GCTIgnoreClangWarningEnd //-Wdeprecated-writable-strings dummy warning to enable -fconst-strings #define GCT_IGNORE_WDEPRECATED_WRITABLE_STRINGS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdeprecated-writable-strings) #define GCT_IGNORE_WDEPRECATED_WRITABLE_STRINGS_WARNING_END GCTIgnoreClangWarningEnd //-Wdirect-ivar-access instance variable %0 is being directly accessed #define GCT_IGNORE_WDIRECT_IVAR_ACCESS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdirect-ivar-access) #define GCT_IGNORE_WDIRECT_IVAR_ACCESS_WARNING_END GCTIgnoreClangWarningEnd //-Wdistributed-object-modifiers conflicting distributed object modifiers on return type in implementation of %0 //-Wdistributed-object-modifiers conflicting distributed object modifiers on parameter type in implementation of %0 #define GCT_IGNORE_WDISTRIBUTED_OBJECT_MODIFIERS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdistributed-object-modifiers) #define GCT_IGNORE_WDISTRIBUTED_OBJECT_MODIFIERS_WARNING_END GCTIgnoreClangWarningEnd //-Wdivision-by-zero division by zero is undefined //-Wdivision-by-zero remainder by zero is undefined #define GCT_IGNORE_WDIVISION_BY_ZERO_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdivision-by-zero) #define GCT_IGNORE_WDIVISION_BY_ZERO_WARNING_END GCTIgnoreClangWarningEnd //-Wdocumentation parameter '%0' not found in the function declaration //-Wdocumentation not a Doxygen trailing comment #define GCT_IGNORE_WDOCUMENTATION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdocumentation) #define GCT_IGNORE_WDOCUMENTATION_WARNING_END GCTIgnoreClangWarningEnd //-Wduplicate-enum element %0 has been implicitly assigned %1 which another element has been assigned #define GCT_IGNORE_WDUPLICATE_ENUM_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wduplicate-enum) #define GCT_IGNORE_WDUPLICATE_ENUM_WARNING_END GCTIgnoreClangWarningEnd //-Wduplicate-method-match multiple declarations of method %0 found and ignored #define GCT_IGNORE_WDUPLICATE_METHOD_MATCH_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wduplicate-method-match) #define GCT_IGNORE_WDUPLICATE_METHOD_MATCH_WARNING_END GCTIgnoreClangWarningEnd //-Wdynamic-class-memaccess %select{destination for|source of|first operand of|second operand of}0 this %1 call is a pointer to dynamic class %2 vtable pointer will be %select{overwritten|copied|moved|compared}3 #define GCT_IGNORE_WDYNAMIC_CLASS_MEMACCESS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdynamic-class-memaccess) #define GCT_IGNORE_WDYNAMIC_CLASS_MEMACCESS_WARNING_END GCTIgnoreClangWarningEnd //-Wempty-body switch statement has empty body //-Wempty-body for loop has empty body //-Wempty-body if statement has empty body //-Wempty-body range-based for loop has empty body //-Wempty-body while loop has empty body #define GCT_IGNORE_WEMPTY_BODY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wempty-body) #define GCT_IGNORE_WEMPTY_BODY_WARNING_END GCTIgnoreClangWarningEnd //-Wenum-compare comparison of two values with different enumeration types%diff{ ($ and $)|}0,1 #define GCT_IGNORE_WENUM_COMPARE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wenum-compare) #define GCT_IGNORE_WENUM_COMPARE_WARNING_END GCTIgnoreClangWarningEnd //-Wenum-conversion implicit conversion from enumeration type %0 to different enumeration type %1 #define GCT_IGNORE_WENUM_CONVERSION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wenum-conversion) #define GCT_IGNORE_WENUM_CONVERSION_WARNING_END GCTIgnoreClangWarningEnd //-Wexit-time-destructors declaration requires an exit-time destructor #define GCT_IGNORE_WEXIT_TIME_DESTRUCTORS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wexit-time-destructors) #define GCT_IGNORE_WEXIT_TIME_DESTRUCTORS_WARNING_END GCTIgnoreClangWarningEnd //-Wexplicit-ownership-type method parameter of type %0 with no explicit ownership #define GCT_IGNORE_WEXPLICIT_OWNERSHIP_TYPE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wexplicit-ownership-type) #define GCT_IGNORE_WEXPLICIT_OWNERSHIP_TYPE_WARNING_END GCTIgnoreClangWarningEnd //-Wextern-c-compat %select{|empty }0%select{struct|union}1 has size 0 in C, %select{size 1|non-zero size}2 in C++ #define GCT_IGNORE_WEXTERN_C_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wextern-c-compat) #define GCT_IGNORE_WEXTERN_C_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wextern-initializer 'extern' variable has an initializer #define GCT_IGNORE_WEXTERN_INITIALIZER_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wextern-initializer) #define GCT_IGNORE_WEXTERN_INITIALIZER_WARNING_END GCTIgnoreClangWarningEnd //-Wfloat-equal comparing floating point with == or != is unsafe #define GCT_IGNORE_WFLOAT_EQUAL_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wfloat-equal) #define GCT_IGNORE_WFLOAT_EQUAL_WARNING_END GCTIgnoreClangWarningEnd //-Wformat "data argument position '%0' exceeds the number of data arguments (%1) //-Wformat position arguments in format strings start counting at 1 (not 0) //-Wformat invalid position specified for %select{field width|field precision}0 //-Wformat cannot mix positional and non-positional arguments in format string //-Wformat values of type '%0' should not be used as format arguments add an explicit cast to %1 instead //-Wformat format specifies type %0 but the argument has type %1 //-Wformat zero field width in scanf format string is unused //-Wformat no closing ']' for '%%[' in scanf format string //-Wformat format string should not be a wide string //-Wformat format string contains '\\0' within the string body //-Wformat '%select{*|.*}0' specified field %select{width|precision}0 is missing a matching 'int' argument //-Wformat field %select{width|precision}0 should have type %1, but argument has type %2 //-Wformat %select{field width|precision}0 used with '%1' conversion specifier, resulting in undefined behavior //-Wformat format string missing //-Wformat incomplete format specifier //-Wformat flag '%0' results in undefined behavior with '%1' conversion specifier //-Wformat flag '%0' is ignored when flag '%1' is present //-Wformat more '%%' conversions than data arguments //-Wformat length modifier '%0' results in undefined behavior or no effect with '%1' conversion specifier #define GCT_IGNORE_WFORMAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wformat) #define GCT_IGNORE_WFORMAT_WARNING_END GCTIgnoreClangWarningEnd //-Wformat-extra-args data argument not used by format string #define GCT_IGNORE_WFORMAT_EXTRA_ARGS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wformat-extra-args) #define GCT_IGNORE_WFORMAT_EXTRA_ARGS_WARNING_END GCTIgnoreClangWarningEnd //-Wformat-invalid-specifier invalid conversion specifier '%0' #define GCT_IGNORE_WFORMAT_INVALID_SPECIFIER_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wformat-invalid-specifier) #define GCT_IGNORE_WFORMAT_INVALID_SPECIFIER_WARNING_END GCTIgnoreClangWarningEnd //-Wformat-nonliteral format string is not a string literal #define GCT_IGNORE_WFORMAT_NONLITERAL_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wformat-nonliteral) #define GCT_IGNORE_WFORMAT_NONLITERAL_WARNING_END GCTIgnoreClangWarningEnd //-Wformat-security format string is not a string literal (potentially insecure) #define GCT_IGNORE_WFORMAT_SECURITY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wformat-security) #define GCT_IGNORE_WFORMAT_SECURITY_WARNING_END GCTIgnoreClangWarningEnd //-Wformat-zero-length format string is empty #define GCT_IGNORE_WFORMAT_ZERO_LENGTH_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wformat-zero-length) #define GCT_IGNORE_WFORMAT_ZERO_LENGTH_WARNING_END GCTIgnoreClangWarningEnd //-Wgcc-compat GCC does not allow the 'cleanup' attribute argument to be anything other than a simple identifier #define GCT_IGNORE_WGCC_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wgcc-compat) #define GCT_IGNORE_WGCC_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wglobal-constructors declaration requires a global constructor //-Wglobal-constructors declaration requires a global destructor #define GCT_IGNORE_WGLOBAL_CONSTRUCTORS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wglobal-constructors) #define GCT_IGNORE_WGLOBAL_CONSTRUCTORS_WARNING_END GCTIgnoreClangWarningEnd //-Wgnu-conditional-omitted-operand use of GNU ?: conditional expression extension, omitting middle operand #define GCT_IGNORE_WGNU_CONDITIONAL_OMITTED_OPERAND_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wgnu-conditional-omitted-operand) #define GCT_IGNORE_WGNU_CONDITIONAL_OMITTED_OPERAND_WARNING_END GCTIgnoreClangWarningEnd //-Wheader-hygiene using namespace directive in global context in header #define GCT_IGNORE_WHEADER_HYGIENE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wheader-hygiene) #define GCT_IGNORE_WHEADER_HYGIENE_WARNING_END GCTIgnoreClangWarningEnd //-Widiomatic-parentheses using the result of an assignment as a condition without parentheses #define GCT_IGNORE_WIDIOMATIC_PARENTHESES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Widiomatic-parentheses) #define GCT_IGNORE_WIDIOMATIC_PARENTHESES_WARNING_END GCTIgnoreClangWarningEnd //-Wignored-attributes 'malloc' attribute only applies to functions returning a pointer type //-Wignored-attributes %0 attribute only applies to %select{functions|unions|variables and functions|functions and methods|parameters|functions, methods and blocks|functions, methods, and classes|functions, methods, and parameters|classes|variables|methods|variables, functions and labels|fields and global variables|structs|variables, functions and tag types|thread-local variables|variables and fields|variables, data members and tag types|types and namespaces|Objective-C interfaces}1 //-Wignored-attributes '%0' attribute cannot be specified on a definition //-Wignored-attributes __weak attribute cannot be specified on an automatic variable when ARC is not enabled //-Wignored-attributes Objective-C GC does not allow weak variables on the stack //-Wignored-attributes __weak attribute cannot be specified on a field declaration //-Wignored-attributes attribute %0 cannot be applied to %select{functions|Objective-C method}1 without return value //-Wignored-attributes attribute declaration must precede definition //-Wignored-attributes attribute %0 is ignored, place it after \"%select{class|struct|union|interface|enum}1\" to apply attribute to type declaration //-Wignored-attributes __declspec attribute %0 is not supported //-Wignored-attributes attribute %0 ignored, because it cannot be applied to a type //-Wignored-attributes attribute %0 after definition is ignored //-Wignored-attributes %0 attribute ignored //-Wignored-attributes 'sentinel' attribute only supported for variadic %select{functions|blocks}0 //-Wignored-attributes 'sentinel' attribute requires named arguments //-Wignored-attributes '%0' only applies to %select{function|pointer|Objective-C object or block pointer}1 types type here is %2 //-Wignored-attributes 'nonnull' attribute applied to function with no pointer arguments //-Wignored-attributes %0 attribute can only be applied to instance variables or properties //-Wignored-attributes ibaction attribute can only be applied to Objective-C instance methods //-Wignored-attributes %0 calling convention ignored on variadic function //-Wignored-attributes %0 only applies to variables with static storage duration and functions //-Wignored-attributes %0 attribute argument not supported: %1 //-Wignored-attributes #pramga ms_struct can not be used with dynamic classes or structures //-Wignored-attributes transparent union definition must contain at least one field transparent_union attribute ignored //-Wignored-attributes first field of a transparent union cannot have %select{floating point|vector}0 type %1 transparent_union attribute ignored //-Wignored-attributes 'gnu_inline' attribute requires function to be marked 'inline', attribute ignored //-Wignored-attributes calling convention %0 ignored for this target //-Wignored-attributes transparent_union attribute can only be applied to a union definition attribute ignored //-Wignored-attributes %select{alignment|size}0 of field %1 (%2 bits) does not match the %select{alignment|size}0 of the first field in transparent union transparent_union attribute ignored //-Wignored-attributes attribute %0 is already applied //-Wignored-attributes %0 attribute ignored for field of type %1 //-Wignored-attributes %0 attribute ignored when parsing type //-Wignored-attributes %0 attribute only applies to %select{functions|methods|properties}1 that return %select{an Objective-C object|a pointer|a non-retainable pointer}2 //-Wignored-attributes %0 attribute only applies to %select{Objective-C object|pointer}1 parameters //-Wignored-attributes attribute %0 is already applied with different parameters //-Wignored-attributes unknown visibility %0 #define GCT_IGNORE_WIGNORED_ATTRIBUTES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wignored-attributes) #define GCT_IGNORE_WIGNORED_ATTRIBUTES_WARNING_END GCTIgnoreClangWarningEnd //-Wignored-qualifiers "'%0' type qualifier%s1 on return type %plural{1:has|:have}1 no effect //-Wignored-qualifiers ARC %select{unused|__unsafe_unretained|__strong|__weak|__autoreleasing}0 lifetime qualifier on return type is ignored #define GCT_IGNORE_WIGNORED_QUALIFIERS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wignored-qualifiers) #define GCT_IGNORE_WIGNORED_QUALIFIERS_WARNING_END GCTIgnoreClangWarningEnd //-Wimplicit-atomic-properties property is assumed atomic by default //-Wimplicit-atomic-properties property is assumed atomic when auto-synthesizing the property #define GCT_IGNORE_WIMPLICIT_ATOMIC_PROPERTIES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wimplicit-atomic-properties) #define GCT_IGNORE_WIMPLICIT_ATOMIC_PROPERTIES_WARNING_END GCTIgnoreClangWarningEnd //-Wimplicit-fallthrough fallthrough annotation in unreachable code //-Wimplicit-fallthrough unannotated fall-through between switch labels //-Wimplicit-fallthrough fallthrough annotation does not directly precede switch label #define GCT_IGNORE_WIMPLICIT_FALLTHROUGH_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wimplicit-fallthrough) #define GCT_IGNORE_WIMPLICIT_FALLTHROUGH_WARNING_END GCTIgnoreClangWarningEnd //-Wimplicit-function-declaration implicit declaration of function %0 //-Wimplicit-function-declaration use of unknown builtin %0 #define GCT_IGNORE_WIMPLICIT_FUNCTION_DECLARATION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wimplicit-function-declaration) #define GCT_IGNORE_WIMPLICIT_FUNCTION_DECLARATION_WARNING_END GCTIgnoreClangWarningEnd //-Wimplicit-retain-self "block implicitly retains 'self' explicitly mention 'self' to indicate this is intended behavior #define GCT_IGNORE_WIMPLICIT_RETAIN_SELF_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wimplicit-retain-self) #define GCT_IGNORE_WIMPLICIT_RETAIN_SELF_WARNING_END GCTIgnoreClangWarningEnd //-Wincompatible-library-redeclaration incompatible redeclaration of library function %0 #define GCT_IGNORE_WIMPLICIT_LIBRARY_REDECLARATION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wincompatible-library-redeclaration) #define GCT_IGNORE_WIMPLICIT_LIBRARY_REDECLARATION_WARNING_END GCTIgnoreClangWarningEnd //-Wincomplete-implementation method definition for %0 not found #define GCT_IGNORE_WINCOMPLETE_IMPLEMENTATION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wincomplete-implementation) #define GCT_IGNORE_WINCOMPLETE_IMPLEMENTATION_WARNING_END GCTIgnoreClangWarningEnd //-Winherited-variadic-ctor inheriting constructor does not inherit ellipsis #define GCT_IGNORE_WINHERITED_VARIADIC_CTOR_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Winherited-variadic-ctor) #define GCT_IGNORE_WINHERITED_VARIADIC_CTOR_WARNING_END GCTIgnoreClangWarningEnd //-Winitializer-overrides subobject initialization overrides initialization of other fields within its enclosing subobject //-Winitializer-overrides initializer overrides prior initialization of this subobject #define GCT_IGNORE_WINITIALIZER_OVERRIDES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Winitializer-overrides) #define GCT_IGNORE_WINITIALIZER_OVERRIDES_WARNING_END GCTIgnoreClangWarningEnd //-Wint-to-pointer-cast cast to %1 from smaller integer type %0 #define GCT_IGNORE_WINT_TO_POINTER_CAST_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wint-to-pointer-cast) #define GCT_IGNORE_WINT_TO_POINTER_CAST_WARNING_END GCTIgnoreClangWarningEnd //-Wint-to-void-pointer-cast cast to %1 from smaller integer type %0 #define GCT_IGNORE_WINT_TO_VOID_POINTER_CAST_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wint-to-void-pointer-cast) #define GCT_IGNORE_WINT_TO_VOID_POINTER_CAST_WARNING_END GCTIgnoreClangWarningEnd //-Winvalid-iboutlet IBOutletCollection properties should be copy/strong and not assign //-Winvalid-iboutlet %select{instance variable|property}2 with %0 attribute must be an object type (invalid %1) #define GCT_IGNORE_WINVALID_IBOUTLET_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Winvalid-iboutlet) #define GCT_IGNORE_WINVALID_IBOUTLET_WARNING_END GCTIgnoreClangWarningEnd //-Winvalid-noreturn function %0 declared 'noreturn' should not return //-Winvalid-noreturn function declared 'noreturn' should not return #define GCT_IGNORE_WINVALID_NORETURN_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Winvalid-noreturn) #define GCT_IGNORE_WINVALID_NORETURN_WARNING_END GCTIgnoreClangWarningEnd //-Wlarge-by-value-copy return value of %0 is a large (%1 bytes) pass-by-value object pass it by reference instead ? //-Wlarge-by-value-copy %0 is a large (%1 bytes) pass-by-value argument pass it by reference instead ? #define GCT_IGNORE_WLARGE_BY_VALUE_COPY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wlarge-by-value-copy) #define GCT_IGNORE_WLARGE_BY_VALUE_COPY_WARNING_END GCTIgnoreClangWarningEnd //-Wliteral-conversion implicit conversion from %0 to %1 changes value from %2 to %3 #define GCT_IGNORE_WLITERAL_CONVERSION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wliteral-conversion) #define GCT_IGNORE_WLITERAL_CONVERSION_WARNING_END GCTIgnoreClangWarningEnd //-Wliteral-range magnitude of floating-point constant too large for type %0 maximum is %1 //-Wliteral-range magnitude of floating-point constant too small for type %0 minimum is %1 #define GCT_IGNORE_WLITERAL_RANGE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wliteral-range) #define GCT_IGNORE_WLITERAL_RANGE_WARNING_END GCTIgnoreClangWarningEnd //-Wlogical-not-parentheses logical not is only applied to the left hand side of this comparison #define GCT_IGNORE_WLOGICAL_NOT_PARENTHESES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wlogical-not-parentheses) #define GCT_IGNORE_WLOGICAL_NOT_PARENTHESES_WARNING_END GCTIgnoreClangWarningEnd //-Wlogical-op-parentheses '&&' within '||' #define GCT_IGNORE_WLOGICAL_OP_PARENTHESES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wlogical-op-parentheses) #define GCT_IGNORE_WLOGICAL_OP_PARENTHESES_WARNING_END GCTIgnoreClangWarningEnd //-Wloop-analysis variable%select{s| %1|s %1 and %2|s %1, %2, and %3|s %1, %2, %3, and %4}0 used in loop condition not modified in loop body //-Wloop-analysis variable %0 is %select{decremented|incremented}1 both in the loop header and in the loop body #define GCT_IGNORE_WLOOP_ANALYSIS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wloop-analysis) #define GCT_IGNORE_WLOOP_ANALYSIS_WARNING_END GCTIgnoreClangWarningEnd //-Wmethod-signatures conflicting parameter types in implementation of %0: %1 vs %2 //-Wmethod-signatures conflicting return type in implementation of %0: %1 vs %2 #define GCT_IGNORE_WMETHOD_SIGNATURES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmethod-signatures) #define GCT_IGNORE_WMETHOD_SIGNATURES_WARNING_END GCTIgnoreClangWarningEnd //-Wmicrosoft extra qualification on member %0 #define GCT_IGNORE_WMICROSOFT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmicrosoft) #define GCT_IGNORE_WMICROSOFT_WARNING_END GCTIgnoreClangWarningEnd //-Wmismatched-method-attributes attributes on method implementation and its declaration must match #define GCT_IGNORE_WMISMATCHED_METHOD_ATTRIBUTES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmismatched-method-attributes) #define GCT_IGNORE_WMISMATCHED_METHOD_ATTRIBUTES_WARNING_END GCTIgnoreClangWarningEnd //-Wmismatched-parameter-types conflicting parameter types in implementation of %0%diff{: $ vs $|}1,2 #define GCT_IGNORE_WMISMATCHED_PARAMETER_TYPES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmismatched-parameter-types) #define GCT_IGNORE_WMISMATCHED_PARAMETER_TYPES_WARNING_END GCTIgnoreClangWarningEnd //-Wmismatched-return-types conflicting return type in implementation of %0%diff{: $ vs $|}1,2 #define GCT_IGNORE_WMISMATCHED_RETURN_TYPES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmismatched-return-types) #define GCT_IGNORE_WMISMATCHED_RETURN_TYPES_WARNING_END GCTIgnoreClangWarningEnd //-Wmissing-braces suggest braces around initialization of subobject #define GCT_IGNORE_WMISSING_BRACES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmissing-braces) #define GCT_IGNORE_WMISSING_BRACES_WARNING_END GCTIgnoreClangWarningEnd //-Wmissing-declarations '%0' ignored on this declaration #define GCT_IGNORE_WMISSING_DECLARATIONS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmissing-declarations) #define GCT_IGNORE_WMISSING_DECLARATIONS_WARNING_END GCTIgnoreClangWarningEnd //-Wmissing-field-initializers missing field '%0' initializer #define GCT_IGNORE_WMISSING_FIELD_INITIALIZERS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmissing-field-initializers) #define GCT_IGNORE_WMISSING_FIELD_INITIALIZERS_WARNING_END GCTIgnoreClangWarningEnd //-Wmissing-method-return-type method has no return type specified defaults to 'id' #define GCT_IGNORE_WMISSING_METHOD_RETURN_TYPE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmissing-method-return-type) #define GCT_IGNORE_WMISSING_METHOD_RETURN_TYPE_WARNING_END GCTIgnoreClangWarningEnd //-Wmissing-noreturn %select{function|method}0 %1 could be declared with attribute 'noreturn' //-Wmissing-noreturn block could be declared with attribute 'noreturn' #define GCT_IGNORE_WMISSING_NORETURN_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmissing-noreturn) #define GCT_IGNORE_WMISSING_NORETURN_WARNING_END GCTIgnoreClangWarningEnd //-Wmissing-prototypes no previous prototype for function %0 #define GCT_IGNORE_WMISSING_PROTOTYPES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmissing-prototypes) #define GCT_IGNORE_WMISSING_PROTOTYPES_WARNING_END GCTIgnoreClangWarningEnd //-Wmissing-variable-declarations no previous extern declaration for non-static variable %0 #define GCT_IGNORE_WMISSING_VARIABLE_DECLARATIONS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmissing-variable-declarations) #define GCT_IGNORE_WMISSING_VARIABLE_DECLARATIONS_WARNING_END GCTIgnoreClangWarningEnd //-Wmultiple-move-vbase defaulted move assignment operator of %0 will move assign virtual base class %1 multiple times #define GCT_IGNORE_WMULTIPLE_MOVE_VBASE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmultiple-move-vbase) #define GCT_IGNORE_WMULTIPLE_MOVE_VBASE_WARNING_END GCTIgnoreClangWarningEnd //-Wnested-anon-types anonymous types declared in an anonymous union/struct are an extension #define GCT_IGNORE_WNESTED_ANON_TYPES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wnested-anon-types) #define GCT_IGNORE_WNESTED_ANON_TYPES_WARNING_END GCTIgnoreClangWarningEnd //-Wno-typedef-redefinition Redefinition of typedef '%0' is a C11 feature #define GCT_IGNORE_WNO_TYPEDEF_REDEFINITION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wno-typedef-redefinition) #define GCT_IGNORE_WNO_TYPEDEF_REDEFINITION_WARNING_END GCTIgnoreClangWarningEnd //-Wnon-literal-null-conversion "expression which evaluates to zero treated as a null pointer constant of " "type %0 #define GCT_IGNORE_WNON_LITERAL_NULL_CONVERSION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wnon-literal-null-conversion) #define GCT_IGNORE_WNON_LITERAL_NULL_CONVERSION_WARNING_END GCTIgnoreClangWarningEnd //-Wnon-pod-varargs second argument to 'va_arg' is of ARC ownership-qualified type %0 //-Wnon-pod-varargs cannot pass %select{non-POD|non-trivial}0 object of type %1 to variadic %select{function|block|method|constructor}2 expected type from format string was %3 //-Wnon-pod-varargs second argument to 'va_arg' is of non-POD type %0 //-Wnon-pod-varargs cannot pass object of %select{non-POD|non-trivial}0 type %1 through variadic %select{function|block|method|constructor}2 call will abort at runtime #define GCT_IGNORE_WNON_POD_VARARGS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wnon-pod-varargs) #define GCT_IGNORE_WNON_POD_VARARGS_WARNING_END GCTIgnoreClangWarningEnd //-Wnon-virtual-dtor %0 has virtual functions but non-virtual destructor #define GCT_IGNORE_WNON_VIRTUAL_DTOR_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wnon-virtual-dtor) #define GCT_IGNORE_WNON_VIRTUAL_DTOR_WARNING_END GCTIgnoreClangWarningEnd //-Wnonnull null passed to a callee which requires a non-null argument #define GCT_IGNORE_WNONNULL_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wnonnull) #define GCT_IGNORE_WNONNULL_WARNING_END GCTIgnoreClangWarningEnd //-Wnull-arithmetic use of NULL in arithmetic operation //-Wnull-arithmetic comparison between NULL and non-pointer %select{(%1 and NULL)|(NULL and %1)}0 #define GCT_IGNORE_WNULL_ARITHMETIC_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wnull-arithmetic) #define GCT_IGNORE_WNULL_ARITHMETIC_WARNING_END GCTIgnoreClangWarningEnd //-Wnull-dereference indirection of non-volatile null pointer will be deleted, not trap #define GCT_IGNORE_WNULL_DEREFERENCE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wnull-dereference) #define GCT_IGNORE_WNULL_DEREFERENCE_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-autosynthesis-property-ivar-name-match autosynthesized property %0 will use %select{|synthesized}1 instance variable %2, not existing instance variable %3 #define GCT_IGNORE_WOBJC_AUTOSYNTHESIS_PROPERTY_IVAR_NAME_MATCH_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-autosynthesis-property-ivar-name-match) #define GCT_IGNORE_WOBJC_AUTOSYNTHESIS_PROPERTY_IVAR_NAME_MATCH_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-forward-class-redefinition redefinition of forward class %0 of a typedef name of an object type is ignored #define GCT_IGNORE_WOBJC_FORWARD_CLASS_REDEFINITION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-forward-class-redefinition) #define GCT_IGNORE_WOBJC_FORWARD_CLASS_REDEFINITION_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-interface-ivars declaration of instance variables in the interface is deprecated #define GCT_IGNORE_WOBJC_INTERFACE_IVARS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-interface-ivars) #define GCT_IGNORE_WOBJC_INTERFACE_IVARS_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-literal-compare direct comparison of %select{an array literal|a dictionary literal|a numeric literal|a boxed expression|}0 has undefined behavior #define GCT_IGNORE_WOBJC_LITERAL_COMPARE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-literal-compare) #define GCT_IGNORE_WOBJC_LITERAL_COMPARE_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-literal-missing-atsign string literal must be prefixed by '@' #define GCT_IGNORE_WOBJC_LITERAL_MISSING_ATSIGN_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-literal-missing-atsign) #define GCT_IGNORE_WOBJC_LITERAL_MISSING_ATSIGN_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-method-access instance method %objcinstance0 not found (return type defaults to 'id') did you mean %objcinstance2? //-Wobjc-method-access class method %objcclass0 not found (return type defaults to 'id') did you mean %objcclass2? //-Wobjc-method-access instance method %objcinstance0 not found (return type defaults to 'id') //-Wobjc-method-access instance method %0 is being used on 'Class' which is not in the root class //-Wobjc-method-access class method %objcclass0 not found (return type defaults to 'id') //-Wobjc-method-access instance method %0 found instead of class method %1 #define GCT_IGNORE_WOBJC_METHOD_ACCESS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-method-access) #define GCT_IGNORE_WOBJC_METHOD_ACCESS_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-missing-property-synthesis "auto property synthesis is synthesizing property not explicitly synthesized #define GCT_IGNORE_WOBJC_MISSING_PROPERTY_SYNTHESIS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-missing-property-synthesis) #define GCT_IGNORE_WOBJC_MISSING_PROPERTY_SYNTHESIS_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-missing-super-calls method possibly missing a [super %0] call #define GCT_IGNORE_WOBJC_MISSING_SUPER_CALLS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-missing-super-calls) #define GCT_IGNORE_WOBJC_MISSING_SUPER_CALLS_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-noncopy-retain-block-property "retain'ed block property does not copy the block " "- use copy attribute instead #define GCT_IGNORE_WOBJC_NONCOPY_RETAIN_BLOCK_PROPERTY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-noncopy-retain-block-property) #define GCT_IGNORE_WOBJC_NONCOPY_RETAIN_BLOCK_PROPERTY_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-nonunified-exceptions can not catch an exception thrown with @throw in C++ in the non-unified exception model #define GCT_IGNORE_WOBJC_NONUNIFIED_EXCEPTIONS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-nonunified-exceptions) #define GCT_IGNORE_WOBJC_NONUNIFIED_EXCEPTIONS_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-property-implementation property %0 requires method %1 to be defined - use @dynamic or provide a method implementation in this category //-Wobjc-property-implementation property %0 requires method %1 to be defined - use @synthesize, @dynamic or provide a method implementation in this class implementation #define GCT_IGNORE_WOBJC_PROPERTY_IMPLEMENTATION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-property-implementation) #define GCT_IGNORE_WOBJC_PROPERTY_IMPLEMENTATION_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-property-implicit-mismatch "primary property declaration is implicitly strong while redeclaration in class extension is weak #define GCT_IGNORE_WOBJC_PROPERTY_IMPLICIT_MISMATCH_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-property-implicit-mismatch) #define GCT_IGNORE_WOBJC_PROPERTY_IMPLICIT_MISMATCH_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-property-matches-cocoa-ownership-rule property's synthesized getter follows Cocoa naming convention for returning 'owned' objects #define GCT_IGNORE_WOBJC_PROPERTY_MATCHES_COCOA_OWNERSHIP_RULE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-property-matches-cocoa-ownership-rule) #define GCT_IGNORE_WOBJC_PROPERTY_MATCHES_COCOA_OWNERSHIP_RULE_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-property-no-attribute no 'assign', 'retain', or 'copy' attribute is specified - 'assign' is assumed //-Wobjc-property-no-attribute default property attribute 'assign' not appropriate for non-GC object #define GCT_IGNORE_WOBJC_PROPERTY_NO_ATTRIBUTE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-property-no-attribute) #define GCT_IGNORE_WOBJC_PROPERTY_NO_ATTRIBUTE_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-property-synthesis auto property synthesis will not synthesize property '%0' because it is 'readwrite' but it will be synthesized 'readonly' via another property //-Wobjc-property-synthesis "auto property synthesis will not synthesize property '%0' because it cannot share an ivar with another synthesized property #define GCT_IGNORE_WOBJC_PROPERTY_SYNTHESIS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-property-synthesis) #define GCT_IGNORE_WOBJC_PROPERTY_SYNTHESIS_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-protocol-method-implementation category is implementing a method which will also be implemented by its primary class #define GCT_IGNORE_WOBJC_PROTOCOL_METHOD_IMPLEMENTATION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-protocol-method-implementation) #define GCT_IGNORE_WOBJC_PROTOCOL_METHOD_IMPLEMENTATION_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-protocol-property-synthesis auto property synthesis will not synthesize property declared in a protocol #define GCT_IGNORE_WOBJC_PROTOCOL_PROPERTY_SYNTHESIS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-protocol-property-synthesis) #define GCT_IGNORE_WOBJC_PROTOCOL_PROPERTY_SYNTHESIS_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-redundant-literal-use using %0 with a literal is redundant #define GCT_IGNORE_WOBJC_REDUNDANT_LITERAL_USE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-redundant-literal-use) #define GCT_IGNORE_WOBJC_REDUNDANT_LITERAL_USE_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-root-class class %0 defined without specifying a base class #define GCT_IGNORE_WOBJC_ROOT_CLASS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-root-class) #define GCT_IGNORE_WOBJC_ROOT_CLASS_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-string-compare direct comparison of a string literal has undefined behavior #define GCT_IGNORE_WOBJC_STRING_COMPARE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-string-compare) #define GCT_IGNORE_WOBJC_STRING_COMPARE_WARNING_END GCTIgnoreClangWarningEnd //-Wobjc-string-concatenation concatenated NSString literal for an NSArray expression - possibly missing a comma #define GCT_IGNORE_WOBJC_STRING_CONCATENATION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wobjc-string-concatenation) #define GCT_IGNORE_WOBJC_STRING_CONCATENATION_WARNING_END GCTIgnoreClangWarningEnd //-Wover-aligned type %0 requires %1 bytes of alignment and the default allocator only guarantees %2 bytes #define GCT_IGNORE_WOVER_ALIGNED_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wover-aligned) #define GCT_IGNORE_WOVER_ALIGNED_WARNING_END GCTIgnoreClangWarningEnd //-Woverloaded-shift-op-parentheses overloaded operator %select{|}0 has lower precedence than comparison operator #define GCT_IGNORE_WOVERLOADED_SHIFT_OP_PARENTHESES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Woverloaded-shift-op-parentheses) #define GCT_IGNORE_WOVERLOADED_SHIFT_OP_PARENTHESES_WARNING_END GCTIgnoreClangWarningEnd //-Woverloaded-virtual %q0 hides overloaded virtual %select{function|functions}1 #define GCT_IGNORE_WOVERLOADED_VIRTUAL_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Woverloaded-virtual) #define GCT_IGNORE_WOVERLOADED_VIRTUAL_WARNING_END GCTIgnoreClangWarningEnd //-Woverriding-method-mismatch conflicting distributed object modifiers on parameter type in declaration of %0 //-Woverriding-method-mismatch conflicting parameter types in declaration of %0: %1 vs %2 //-Woverriding-method-mismatch conflicting variadic declaration of method and its implementation //-Woverriding-method-mismatch conflicting distributed object modifiers on return type in declaration of %0 //-Woverriding-method-mismatch conflicting parameter types in declaration of %0%diff{: $ vs $|}1,2 //-Woverriding-method-mismatch conflicting return type in declaration of %0%diff{: $ vs $|}1,2 //-Woverriding-method-mismatch conflicting return type in declaration of %0: %1 vs %2 #define GCT_IGNORE_WOVERRIDING_METHOD_MISMATCH_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Woverriding-method-mismatch) #define GCT_IGNORE_WOVERRIDING_METHOD_MISMATCH_WARNING_END GCTIgnoreClangWarningEnd //-Wpacked packed attribute is unnecessary for %0 //-Wpadded padding %select{struct|interface|class}0 %1 with %2 %select{byte|bit}3%select{|s}4 to align anonymous bit-field //-Wpadded padding %select{struct|interface|class}0 %1 with %2 %select{byte|bit}3%select{|s}4 to align %5 //-Wpadded padding size of %0 with %1 %select{byte|bit}2%select{|s}3 to alignment boundary #define GCT_IGNORE_WPACKED_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wpadded) #define GCT_IGNORE_WPACKED_WARNING_END GCTIgnoreClangWarningEnd //-Wparentheses using the result of an assignment as a condition without parentheses //-Wparentheses %0 has lower precedence than %1 %1 will be evaluated first //-Wparentheses operator '?:' has lower precedence than '%0' '%0' will be evaluated first #define GCT_IGNORE_WPARENTHESE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wparentheses) #define GCT_IGNORE_WPARENTHESE_WARNING_END GCTIgnoreClangWarningEnd //-Wparentheses-equality equality comparison with extraneous parentheses #define GCT_IGNORE_WPARENTHESE_EQUALITY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wparentheses-equality) #define GCT_IGNORE_WPARENTHESE_EQUALITY_WARNING_END GCTIgnoreClangWarningEnd //-Wpointer-arith subtraction of pointers to type %0 of zero size has undefined behavior #define GCT_IGNORE_WPOINTER_ARITH_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wpointer-arith) #define GCT_IGNORE_WPOINTER_ARITH_WARNING_END GCTIgnoreClangWarningEnd //-Wpredefined-identifier-outside-function predefined identifier is only valid inside function #define GCT_IGNORE_WPREDEFINED_IDENTIFIER_OUTSIDE_FUNCTION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wpredefined-identifier-outside-function) #define GCT_IGNORE_WPREDEFINED_IDENTIFIER_OUTSIDE_FUNCTION_WARNING_END GCTIgnoreClangWarningEnd //-Wprivate-extern use of __private_extern__ on a declaration may not produce external symbol private to the linkage unit and is deprecated #define GCT_IGNORE_WPRIVATE_EXTERN_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wprivate-extern) #define GCT_IGNORE_WPRIVATE_EXTERN_WARNING_END GCTIgnoreClangWarningEnd //-Wprotocol method %0 in protocol not implemented #define GCT_IGNORE_WPROTOCOL_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wprotocol) #define GCT_IGNORE_WPROTOCOL_WARNING_END GCTIgnoreClangWarningEnd //-Wprotocol-property-synthesis-ambiguity property of type %0 was selected for synthesis #define GCT_IGNORE_WPROTOCOL_PROPERTY_SYNTHESIS_AMBIGUITY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wprotocol-property-synthesis-ambiguity) #define GCT_IGNORE_WPROTOCOL_PROPERTY_SYNTHESIS_AMBIGUITY_WARNING_END GCTIgnoreClangWarningEnd //-Wreadonly-iboutlet-property readonly IBOutlet property '%0' when auto-synthesized may not work correctly with 'nib' loader #define GCT_IGNORE_WREADONLY_IBOUTLET_PROPERTY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wreadonly-iboutlet-property) #define GCT_IGNORE_WREADONLY_IBOUTLET_PROPERTY_WARNING_END GCTIgnoreClangWarningEnd //-Wreadonly-setter-attrs property attributes '%0' and '%1' are mutually exclusive #define GCT_IGNORE_WREADONLY_SETTER_ATTRS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wreadonly-setter-attrs) #define GCT_IGNORE_WREADONLY_SETTER_ATTRS_WARNING_END GCTIgnoreClangWarningEnd //-Wreceiver-expr receiver type %0 is not 'id' or interface pointer, consider casting it to 'id' #define GCT_IGNORE_WRECEIVER_EXPR_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wreceiver-expr) #define GCT_IGNORE_WRECEIVER_EXPR_WARNING_END GCTIgnoreClangWarningEnd //-Wreceiver-forward-class receiver type %0 for instance message is a forward declaration #define GCT_IGNORE_WRECEIVER_FORWARD_CLASS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wreceiver-forward-class) #define GCT_IGNORE_WRECEIVER_FORWARD_CLASS_WARNING_END GCTIgnoreClangWarningEnd //-Wreceiver-is-weak "weak %select{receiver|property|implicit property}0 may be unpredictably set to nil #define GCT_IGNORE_WRECEIVER_IS_WEAK_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wreceiver-is-weak) #define GCT_IGNORE_WRECEIVER_IS_WEAK_WARNING_END GCTIgnoreClangWarningEnd //-Wreinterpret-base-class 'reinterpret_cast' %select{from|to}3 class %0 %select{to|from}3 its %select{virtual base|base at non-zero offset}2 %1 behaves differently from 'static_cast' #define GCT_IGNORE_WREINTERPRET_BASE_CLASS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wreinterpret-base-class) #define GCT_IGNORE_WREINTERPRET_BASE_CLASS_WARNING_END GCTIgnoreClangWarningEnd //-Wreorder %select{field|base class}0 %1 will be initialized after %select{field|base}2 %3 #define GCT_IGNORE_WREORDER_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wreorder) #define GCT_IGNORE_WREORDER_WARNING_END GCTIgnoreClangWarningEnd //-Wrequires-super-attribute %0 attribute cannot be applied to %select{methods in protocols|dealloc}1 #define GCT_IGNORE_WREQUIRES_SUPER_ATTRIBUTE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wrequires-super-attribute) #define GCT_IGNORE_WREQUIRES_SUPER_ATTRIBUTE_WARNING_END GCTIgnoreClangWarningEnd //-Wreturn-stack-address returning address of local temporary object //-Wreturn-stack-address returning address of label, which is local //-Wreturn-stack-address address of stack memory associated with local variable %0 returned //-Wreturn-stack-address reference to stack memory associated with local variable %0 returned //-Wreturn-stack-address returning reference to local temporary object #define GCT_IGNORE_WRETURN_STACK_ADDRESS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wreturn-stack-address) #define GCT_IGNORE_WRETURN_STACK_ADDRESS_WARNING_END GCTIgnoreClangWarningEnd //-Wreturn-type control may reach end of non-void function //-Wreturn-type non-void %select{function|method}1 %0 should return a value, DefaultError //-Wreturn-type control reaches end of non-void function #define GCT_IGNORE_WRETURN_TYPE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wreturn-type) #define GCT_IGNORE_WRETURN_TYPE_WARNING_END GCTIgnoreClangWarningEnd //-Wreturn-type-c-linkage %0 has C-linkage specified, but returns incomplete type %1 which could be incompatible with C //-Wreturn-type-c-linkage %0 has C-linkage specified, but returns user-defined type %1 which is incompatible with C #define GCT_IGNORE_WRETURN_TYPE_C_LINKAGE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wreturn-type-c-linkage) #define GCT_IGNORE_WRETURN_TYPE_C_LINKAGE_WARNING_END GCTIgnoreClangWarningEnd //-Wsection section does not match previous declaration #define GCT_IGNORE_WSECTION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wsection) #define GCT_IGNORE_WSECTION_WARNING_END GCTIgnoreClangWarningEnd //-Wselector creating selector for nonexistent method %0 #define GCT_IGNORE_WSELECTOR_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wselector) #define GCT_IGNORE_WSELECTOR_WARNING_END GCTIgnoreClangWarningEnd //-Wselector-type-mismatch multiple selectors named %0 found #define GCT_IGNORE_WSELECTOR_TYPE_MISMATCH_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wselector-type-mismatch) #define GCT_IGNORE_WSELECTOR_TYPE_MISMATCH_WARNING_END GCTIgnoreClangWarningEnd //-Wself-assign explicitly assigning a variable of type %0 to itself #define GCT_IGNORE_WSELF_ASSIGN_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wself-assign) #define GCT_IGNORE_WSELF_ASSIGN_WARNING_END GCTIgnoreClangWarningEnd //-Wself-assign-field assigning %select{field|instance variable}0 to itself #define GCT_IGNORE_WSELF_ASSIGN_FIELD_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wself-assign-field) #define GCT_IGNORE_WSELF_ASSIGN_FIELD_WARNING_END GCTIgnoreClangWarningEnd //-Wsentinel "missing sentinel in %select{function call|method dispatch|block call}0 //-Wsentinel not enough variable arguments in %0 declaration to fit a sentinel #define GCT_IGNORE_WSENTINEL_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wsentinel) #define GCT_IGNORE_WSENTINEL_WARNING_END GCTIgnoreClangWarningEnd //-Wshadow declaration shadows a %select{" "local variable|" "variable in %2|" "static data member of %2|" "field of %2}1 #define GCT_IGNORE_WSHADOW_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wshadow) #define GCT_IGNORE_WSHADOW_WARNING_END GCTIgnoreClangWarningEnd //-Wshadow-ivar local declaration of %0 hides instance variable #define GCT_IGNORE_WSHADOW_IVAR_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wshadow-ivar) #define GCT_IGNORE_WSHADOW_IVAR_WARNING_END GCTIgnoreClangWarningEnd //-Wshift-count-negative shift count is negative #define GCT_IGNORE_WSHIFT_COUNT_NEGATIVE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wshift-count-negative) #define GCT_IGNORE_WSHIFT_COUNT_NEGATIVE_WARNING_END GCTIgnoreClangWarningEnd //-Wshift-count-overflow shift count = width of type #define GCT_IGNORE_WSHIFT_COUNT_OVERFLOW_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wshift-count-overflow) #define GCT_IGNORE_WSHIFT_COUNT_OVERFLOW_WARNING_END GCTIgnoreClangWarningEnd //-Wshift-op-parentheses operator '%0' has lower precedence than '%1' '%1' will be evaluated first #define GCT_IGNORE_WSHIFT_OP_PARENTHESES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wshift-op-parentheses) #define GCT_IGNORE_WSHIFT_OP_PARENTHESES_WARNING_END GCTIgnoreClangWarningEnd //-Wshift-overflow signed shift result (%0) requires %1 bits to represent, but %2 only has %3 bits #define GCT_IGNORE_WSHIFT_OVERFLOW_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wshift-overflow) #define GCT_IGNORE_WSHIFT_OVERFLOW_WARNING_END GCTIgnoreClangWarningEnd //-Wshift-sign-overflow signed shift result (%0) sets the sign bit of the shift expression's type (%1) and becomes negative #define GCT_IGNORE_WSHIFT_SIGN_OVERFLOW_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wshift-sign-overflow) #define GCT_IGNORE_WSHIFT_SIGN_OVERFLOW_WARNING_END GCTIgnoreClangWarningEnd //-Wshorten-64-to-32 implicit conversion loses integer precision: %0 to %1 #define GCT_IGNORE_WSHORTEH_64_TO_32_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wshorten-64-to-32) #define GCT_IGNORE_WSHORTEH_64_TO_32_WARNING_END GCTIgnoreClangWarningEnd //-Wsign-compare comparison of integers of different signs: %0 and %1 #define GCT_IGNORE_WSIGN_COMPARE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wsign-compare) #define GCT_IGNORE_WSIGN_COMPARE_WARNING_END GCTIgnoreClangWarningEnd //-Wsign-conversion implicit conversion changes signedness: %0 to %1 //-Wsign-conversion operand of ? changes signedness: %0 to %1 #define GCT_IGNORE_WSIGN_CONVERSION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wsign-conversion) #define GCT_IGNORE_WSIGN_CONVERSION_WARNING_END GCTIgnoreClangWarningEnd //-Wsizeof-array-argument sizeof on array function parameter will return size of %0 instead of %1 #define GCT_IGNORE_WSIZEOF_ARRAY_ARGUMENT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wsizeof-array-argument) #define GCT_IGNORE_WSIZEOF_ARRAY_ARGUMENT_WARNING_END GCTIgnoreClangWarningEnd //-Wsizeof-array-decay sizeof on pointer operation will return size of %0 instead of %1 #define GCT_IGNORE_WSIZEFO_ARRAY_DECAY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wsizeof-array-decay) #define GCT_IGNORE_WSIZEFO_ARRAY_DECAY_WARNING_END GCTIgnoreClangWarningEnd //-Wsizeof-pointer-memaccess '%0' call operates on objects of type %1 while the size is based on a " "different type %2 //-Wsizeof-pointer-memaccess argument to 'sizeof' in %0 call is the same pointer type %1 as the %select{destination|source}2 expected %3 or an explicit length #define GCT_IGNORE_WSIZEFO_POINTER_MEMACCESS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wsizeof-pointer-memaccess) #define GCT_IGNORE_WSIZEFO_POINTER_MEMACCESS_WARNING_END GCTIgnoreClangWarningEnd //-Wsometimes-uninitialized variable %0 is %select{used|captured}1 uninitialized whenever %select{'%3' condition is %select{true|false}4|'%3' loop %select{is entered|exits because its condition is false}4|'%3' loop %select{condition is true|exits because its condition is false}4|switch %3 is taken|its declaration is reached|%3 is called}2 #define GCT_IGNORE_WSOMETIMES_UNINITIALIZED_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wsometimes-uninitialized) #define GCT_IGNORE_WSOMETIMES_UNINITIALIZED_WARNING_END GCTIgnoreClangWarningEnd //-Wstatic-local-in-inline non-constant static local variable in inline function may be different in different files #define GCT_IGNORE_WSTATIC_LOCAL_IN_INLINE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wstatic-local-in-inline) #define GCT_IGNORE_WSTATIC_LOCAL_IN_INLINE_WARNING_END GCTIgnoreClangWarningEnd //-Wstatic-self-init static variable %0 is suspiciously used within its own initialization #define GCT_IGNORE_WSTATIC_SELF_INIT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wstatic-self-init) #define GCT_IGNORE_WSTATIC_SELF_INIT_WARNING_END GCTIgnoreClangWarningEnd //-Wstrict-selector-match multiple methods named %0 found #define GCT_IGNORE_WSTRICT_SELECTOR_MATCH_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wstrict-selector-match) #define GCT_IGNORE_WSTRICT_SELECTOR_MATCH_WARNING_END GCTIgnoreClangWarningEnd //-Wstring-compare result of comparison against %select{a string literal|@encode}0 is unspecified (use strncmp instead) #define GCT_IGNORE_WSTRING_COMPARE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wstring-compare) #define GCT_IGNORE_WSTRING_COMPARE_WARNING_END GCTIgnoreClangWarningEnd //-Wstring-conversion implicit conversion turns string literal into bool: %0 to %1 #define GCT_IGNORE_WSTRING_CONVERSION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wstring-conversion) #define GCT_IGNORE_WSTRING_CONVERSION_WARNING_END GCTIgnoreClangWarningEnd //-Wstring-plus-char adding %0 to a string pointer does not append to the string #define GCT_IGNORE_WSTRING_PLUS_CHAR_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wstring-plus-char) #define GCT_IGNORE_WSTRING_PLUS_CHAR_WARNING_END GCTIgnoreClangWarningEnd //-Wstring-plus-int adding %0 to a string does not append to the string #define GCT_IGNORE_WSTRING_PLUS_INT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wstring-plus-int) #define GCT_IGNORE_WSTRING_PLUS_INT_WARNING_END GCTIgnoreClangWarningEnd //-Wstrlcpy-strlcat-size size argument in %0 call appears to be size of the source expected the size of the destination #define GCT_IGNORE_WSTRLCPY_STRLCAT_SIZE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wstrlcpy-strlcat-size) #define GCT_IGNORE_WSTRLCPY_STRLCAT_SIZE_WARNING_END GCTIgnoreClangWarningEnd //-Wstrncat-size the value of the size argument in 'strncat' is too large, might lead to a " "buffer overflow //-Wstrncat-size size argument in 'strncat' call appears " "to be size of the source //-Wstrncat-size the value of the size argument to 'strncat' is wrong #define GCT_IGNORE_WSTRNCAT_SIZE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wstrncat-size) #define GCT_IGNORE_WSTRNCAT_SIZE_WARNING_END GCTIgnoreClangWarningEnd //-Wsuper-class-method-mismatch method parameter type %diff{$ does not match super class method parameter type $|does not match super class method parameter type}0,1 #define GCT_IGNORE_WSUPER_CLASS_METHOD_MISMATCH_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wsuper-class-method-mismatch) #define GCT_IGNORE_WSUPER_CLASS_METHOD_MISMATCH_WARNING_END GCTIgnoreClangWarningEnd //-Wswitch overflow converting case value to switch condition type (%0 to %1) //-Wswitch case value not in enumerated type %0 //-Wswitch %0 enumeration values not handled in switch: %1, %2, %3... //-Wswitch enumeration values %0 and %1 not handled in switch //-Wswitch enumeration value %0 not handled in switch //-Wswitch enumeration values %0, %1, and %2 not handled in switch #define GCT_IGNORE_WSWITCH_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wswitch) #define GCT_IGNORE_WSWITCH_WARNING_END GCTIgnoreClangWarningEnd //-Wswitch-enum enumeration values %0, %1, and %2 not explicitly handled in switch //-Wswitch-enum enumeration values %0 and %1 not explicitly handled in switch //-Wswitch-enum %0 enumeration values not explicitly handled in switch: %1, %2, %3... //-Wswitch-enum enumeration value %0 not explicitly handled in switch #define GCT_IGNORE_WSWITCH_ENUM_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wswitch-enum) #define GCT_IGNORE_WSWITCH_ENUM_WARNING_END GCTIgnoreClangWarningEnd //-Wtautological-compare comparison of %0 unsigned%select{| enum}2 expression is always %1 //-Wtautological-compare %select{self-|array }0comparison always evaluates to %select{false|true|a constant}1 //-Wtautological-compare comparison of unsigned%select{| enum}2 expression %0 is always %1 #define GCT_IGNORE_WTAUTOLOGICAL_COMPARE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wtautological-compare) #define GCT_IGNORE_WTAUTOLOGICAL_COMPARE_WARNING_END GCTIgnoreClangWarningEnd //-Wtautological-constant-out-of-range-compare comparison of constant %0 with expression of type %1 is always %select{false|true}2 #define GCT_IGNORE_WTAUTOLOGICAL_CONSTANT_OUT_OF_RANGE_COMPARE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wtautological-constant-out-of-range-compare) #define GCT_IGNORE_WTAUTOLOGICAL_CONSTANT_OUT_OF_RANGE_COMPARE_WARNING_END GCTIgnoreClangWarningEnd //-Wthread-safety-analysis locking '%0' that is already locked //-Wthread-safety-analysis cannot call function '%0' while mutex '%1' is locked //-Wthread-safety-analysis %select{reading|writing}2 the value pointed to by '%0' requires locking %select{'%1'|'%1' exclusively}2 //-Wthread-safety-analysis unlocking '%0' that was not locked //-Wthread-safety-analysis mutex '%0' is locked exclusively and shared in the same scope //-Wthread-safety-analysis calling function '%0' requires %select{shared|exclusive}2 lock on '%1' //-Wthread-safety-analysis %select{reading|writing}2 variable '%0' requires locking %select{'%1'|'%1' exclusively}2 //-Wthread-safety-analysis cannot resolve lock expression //-Wthread-safety-analysis expecting mutex '%0' to be locked at the end of function //-Wthread-safety-analysis mutex '%0' is not locked on every path through here //-Wthread-safety-analysis %select{reading|writing}1 the value pointed to by '%0' requires locking %select{any mutex|any mutex exclusively}1 //-Wthread-safety-analysis %select{reading|writing}1 variable '%0' requires locking %select{any mutex|any mutex exclusively}1 //-Wthread-safety-analysis mutex '%0' is still locked at the end of function //-Wthread-safety-analysis expecting mutex '%0' to be locked at start of each loop #define GCT_IGNORE_WTHREAD_SAFETY_ANALYSIS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wthread-safety-analysis) #define GCT_IGNORE_WTHREAD_SAFETY_ANALYSIS_WARNING_END GCTIgnoreClangWarningEnd //-Wthread-safety-attributes ignoring %0 attribute because its argument is invalid //-Wthread-safety-attributes %0 attribute only applies to %select{fields and global variables|functions and methods|classes and structs}1 //-Wthread-safety-attributes %0 attribute requires arguments that are class type or point to class type type here is '%1' //-Wthread-safety-attributes %0 attribute can only be applied in a context annotated with 'lockable' attribute //-Wthread-safety-attributes %0 attribute requires arguments whose type is annotated with 'lockable' attribute type here is '%1' //-Wthread-safety-attributes '%0' only applies to pointer types type here is %1 #define GCT_IGNORE_WTHREAD_SAFETY_ATTRIBUTES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wthread-safety-attributes) #define GCT_IGNORE_WTHREAD_SAFETY_ATTRIBUTES_WARNING_END GCTIgnoreClangWarningEnd //-Wthread-safety-beta Thread safety beta warning. #define GCT_IGNORE_WTHREAD_SAFETY_BETA_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wthread-safety-beta) #define GCT_IGNORE_WTHREAD_SAFETY_BETA_WARNING_END GCTIgnoreClangWarningEnd //-Wthread-safety-precise %select{reading|writing}2 the value pointed to by '%0' requires locking %select{'%1'|'%1' exclusively}2 //-Wthread-safety-precise %select{reading|writing}2 variable '%0' requires locking %select{'%1'|'%1' exclusively}2 //-Wthread-safety-precise calling function '%0' requires %select{shared|exclusive}2 lock on '%1' #define GCT_IGNORE_WTHREAD_SAFETY_PRECISE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wthread-safety-precise) #define GCT_IGNORE_WTHREAD_SAFETY_PRECISE_WARNING_END GCTIgnoreClangWarningEnd //-Wtype-safety this type tag was not designed to be used with this function //-Wtype-safety specified %0 type tag requires a null pointer //-Wtype-safety argument type %0 doesn't match specified '%1' type tag %select{that requires %3|}2 #define GCT_IGNORE_WTYPE_SAFETY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wtype-safety) #define GCT_IGNORE_WTYPE_SAFETY_WARNING_END GCTIgnoreClangWarningEnd //-Wundeclared-selector undeclared selector %0 did you mean %1? //-Wundeclared-selector undeclared selector %0 #define GCT_IGNORE_WUNDECLARED_SELECTOR_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wundeclared-selector) #define GCT_IGNORE_WUNDECLARED_SELECTOR_WARNING_END GCTIgnoreClangWarningEnd //-Wundefined-inline inline function %q0 is not defined #define GCT_IGNORE_WUNDEFINED_INLINE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wundefined-inline) #define GCT_IGNORE_WUNDEFINED_INLINE_WARNING_END GCTIgnoreClangWarningEnd //-Wundefined-internal %select{function|variable}0 %q1 has internal linkage but is not defined #define GCT_IGNORE_WUNDEFINED_INTERNAL_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wundefined-internal) #define GCT_IGNORE_WUNDEFINED_INTERNAL_WARNING_END GCTIgnoreClangWarningEnd //-Wundefined-reinterpret-cast dereference of type %1 that was reinterpret_cast from type %0 has undefined behavior //-Wundefined-reinterpret-cast reinterpret_cast from %0 to %1 has undefined behavior #define GCT_IGNORE_WUNDEFINED_REINTERPRET_CAST_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wundefined-reinterpret-cast) #define GCT_IGNORE_WUNDEFINED_REINTERPRET_CAST_WARNING_END GCTIgnoreClangWarningEnd //-Wuninitialized reference %0 is not yet bound to a value when used within its own initialization //-Wuninitialized field %0 is uninitialized when used here //-Wuninitialized block pointer variable %0 is uninitialized when captured by block //-Wuninitialized variable %0 is uninitialized when used within its own initialization //-Wuninitialized variable %0 is uninitialized when %select{used here|captured by block}1 //-Wuninitialized reference %0 is not yet bound to a value when used here #define GCT_IGNORE_WUNINITIALIZED_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wuninitialized) #define GCT_IGNORE_WUNINITIALIZED_WARNING_END GCTIgnoreClangWarningEnd //-Wunneeded-internal-declaration %select{function|variable}0 %1 is not needed and will not be emitted //-Wunneeded-internal-declaration 'static' function %0 declared in header file should be declared 'static inline' #define GCT_IGNORE_WUNNEEDED_INTERNAL_DECLARATION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunneeded-internal-declaration) #define GCT_IGNORE_WUNNEEDED_INTERNAL_DECLARATION_WARNING_END GCTIgnoreClangWarningEnd //-Wunneeded-member-function member function %0 is not needed and will not be emitted #define GCT_IGNORE_WUNNEEDED_MEMBER_FUNCTION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunneeded-member-function) #define GCT_IGNORE_WUNNEEDED_MEMBER_FUNCTION_WARNING_END GCTIgnoreClangWarningEnd //-Wunreachable-code will never be executed #define GCT_IGNORE_WUNREACHABLE_CODE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunreachable-code) #define GCT_IGNORE_WUNREACHABLE_CODE_WARNING_END GCTIgnoreClangWarningEnd //-Wunsequenced multiple unsequenced modifications to %0 //-Wunsequenced unsequenced modification and access to %0 #define GCT_IGNORE_WUNSEQUENCED_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunsequenced) #define GCT_IGNORE_WUNSEQUENCED_WARNING_END GCTIgnoreClangWarningEnd //-Wunsupported-friend dependent nested name specifier '%0' for friend template declaration is not supported ignoring this friend declaration //-Wunsupported-friend dependent nested name specifier '%0' for friend class declaration is not supported turning off access control for %1 #define GCT_IGNORE_WUNSUPPORTED_FRIEND_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunsupported-friend) #define GCT_IGNORE_WUNSUPPORTED_FRIEND_WARNING_END GCTIgnoreClangWarningEnd //-Wunsupported-visibility target does not support 'protected' visibility using 'default' #define GCT_IGNORE_WUNSUPPORTED_VISIBILITY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunsupported-visibility) #define GCT_IGNORE_WUNSUPPORTED_VISIBILITY_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-comparison %select{equality|inequality}0 comparison result unused #define GCT_IGNORE_WUNUSED_COMPARISON_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-comparison) #define GCT_IGNORE_WUNUSED_COMPARISON_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-const-variable unused variable %0 #define GCT_IGNORE_WUNUSED_CONST_VARIABLE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-const-variable) #define GCT_IGNORE_WUNUSED_CONST_VARIABLE_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-exception-parameter unused exception parameter %0 #define GCT_IGNORE_WUNUSED_EXCEPTION_PARAMETER_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-exception-parameter) #define GCT_IGNORE_WUNUSED_EXCEPTION_PARAMETER_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-function unused function %0 #define GCT_IGNORE_WUNUSED_FUNCTION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-function) #define GCT_IGNORE_WUNUSED_FUNCTION_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-label unused label %0 #define GCT_IGNORE_WUNUSED_LABEL_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-label) #define GCT_IGNORE_WUNUSED_LABEL_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-member-function unused member function %0 #define GCT_IGNORE_WUNUSED_MEMBER_FUNCTION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-member-function) #define GCT_IGNORE_WUNUSED_MEMBER_FUNCTION_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-parameter unused parameter %0 #define GCT_IGNORE_WUNUSED_PARAMETER_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-parameter) #define GCT_IGNORE_WUNUSED_PARAMETER_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-private-field private field %0 is not used #define GCT_IGNORE_WUNUSED_PRIVATE_FIELD_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-private-field) #define GCT_IGNORE_WUNUSED_PRIVATE_FIELD_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-property-ivar ivar %0 which backs the property is not referenced in this property's accessor #define GCT_IGNORE_WUNUSED_PROPERTY_IVAR_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-property-ivar) #define GCT_IGNORE_WUNUSED_PROPERTY_IVAR_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-result ignoring return value of function declared with warn_unused_result attribute #define GCT_IGNORE_WUNUSED_RESULT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-result) #define GCT_IGNORE_WUNUSED_RESULT_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-value ignoring return value of function declared with %0 attribute //-Wunused-value expression result unused should this cast be to 'void'? //-Wunused-value expression result unused #define GCT_IGNORE_WUNUSED_VALUE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-value) #define GCT_IGNORE_WUNUSED_VALUE_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-variable unused variable %0 #define GCT_IGNORE_WUNUSED_VARIABLE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-variable) #define GCT_IGNORE_WUNUSED_VARIABLE_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-volatile-lvalue expression result unused assign into a variable to force a volatile load #define GCT_IGNORE_WUNUSED_VOLATILE_LVALUE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-volatile-lvalue) #define GCT_IGNORE_WUNUSED_VOLATILE_LVALUE_WARNING_END GCTIgnoreClangWarningEnd //-Wused-but-marked-unused %0 was marked unused but was used #define GCT_IGNORE_WUSED_BUT_MARKED_UNUSED_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wused-but-marked-unused) #define GCT_IGNORE_WUSED_BUT_MARKED_UNUSED_WARNING_END GCTIgnoreClangWarningEnd //-Wuser-defined-literals user-defined literal suffixes not starting with '_' are reserved%select{ no literal will invoke this operator|}0 #define GCT_IGNORE_WUSED_DEFINE_LITERALS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wuser-defined-literals) #define GCT_IGNORE_WUSED_DEFINE_LITERALS_WARNING_END GCTIgnoreClangWarningEnd //-Wvarargs second parameter of 'va_start' not last named argument //-Wvarargs 'va_start' has undefined behavior with reference types //-Wvarargs second argument to 'va_arg' is of promotable type %0 this va_arg has undefined behavior because arguments will be promoted to %1 #define GCT_IGNORE_WVARARGS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wvarargs) #define GCT_IGNORE_WVARARGS_WARNING_END GCTIgnoreClangWarningEnd //-Wvector-conversion incompatible vector types %select{%diff{assigning to $ from $|assigning to different types}0,1|%diff{passing $ to parameter of type $|passing to parameter of different type}0,1|%diff{returning $ from a function with result type $|returning from function with different return type}0,1|%diff{converting $ to type $|converting between types}0,1|%diff{initializing $ with an expression of type $|initializing with expression of different type}0,1|%diff{sending $ to parameter of type $|sending to parameter of different type}0,1|%diff{casting $ to type $|casting between types}0,1}2 #define GCT_IGNORE_WVECTOR_CONVERSION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wvector-conversion) #define GCT_IGNORE_WVECTOR_CONVERSION_WARNING_END GCTIgnoreClangWarningEnd //-Wvexing-parse parentheses were disambiguated as a function declaration //-Wvexing-parse empty parentheses interpreted as a function declaration #define GCT_IGNORE_WVEXING_PARSE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wvexing-parse) #define GCT_IGNORE_WVEXING_PARSE_WARNING_END GCTIgnoreClangWarningEnd //-Wvisibility declaration of %0 will not be visible outside of this function //-Wvisibility redefinition of %0 will not be visible outside of this function #define GCT_IGNORE_WVISIBILITY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wvisibility) #define GCT_IGNORE_WVISIBILITY_WARNING_END GCTIgnoreClangWarningEnd //-Wvla variable length array used #define GCT_IGNORE_WVLA_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wvla) #define GCT_IGNORE_WVLA_WARNING_END GCTIgnoreClangWarningEnd //-Wvla-extension variable length arrays are a C99 feature #define GCT_IGNORE_WVLA_WXTENSION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wvla-extension) #define GCT_IGNORE_WVLA_WXTENSION_WARNING_END GCTIgnoreClangWarningEnd //-Wweak-template-vtables explicit template instantiation %0 will emit a vtable in every translation unit #define GCT_IGNORE_WWEAK_TEMPLATE_VTABLES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wweak-template-vtables) #define GCT_IGNORE_WWEAK_TEMPLATE_VTABLES_WARNING_END GCTIgnoreClangWarningEnd //-Wweak-vtables %0 has no out-of-line virtual method definitions; its vtable will be emitted in every translation unit #define GCT_IGNORE_WWEAK_VTABLES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wweak-vtables) #define GCT_IGNORE_WWEAK_VTABLES_WARNING_END GCTIgnoreClangWarningEnd #pragma mark - Lexer Warnings //-W#pragma-messages %0 #define GCT_IGNORE_WPRAGMA_MESSAGES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-W#pragma-messages) #define GCT_IGNORE_WPRAGMA_MESSAGES_WARNING_END GCTIgnoreClangWarningEnd //-W#warnings %0 //-W#warnings %0 #define GCT_IGNORE_WWARNINGS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-W#warnings) #define GCT_IGNORE_WWARNINGS_WARNING_END GCTIgnoreClangWarningEnd //-Wambiguous-macro ambiguous expansion of macro %0 #define GCT_IGNORE_WAMBIGUOUS_MACRO_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wambiguous-macro) #define GCT_IGNORE_WAMBIGUOUS_MACRO_WARNING_END GCTIgnoreClangWarningEnd //-Wauto-import treating #%select{include|import|include_next|__include_macros}0 as an import of module '%1' #define GCT_IGNORE_WAUTO_IMPORT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wauto-import) #define GCT_IGNORE_WAUTO_IMPORT_WARNING_END GCTIgnoreClangWarningEnd //-Wbackslash-newline-escape backslash and newline separated by space #define GCT_IGNORE_WBACKSLASH_NEWLINE_ESCAPE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wbackslash-newline-escape) #define GCT_IGNORE_WBACKSLASH_NEWLINE_ESCAPE_WARNING_END GCTIgnoreClangWarningEnd //-Wc++11-compat identifier after literal will be treated as a user-defined literal suffix in C++11 //-Wc++11-compat '%0' is a keyword in C++11 #define GCT_IGNORE_WC11_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++11-compat) #define GCT_IGNORE_WC11_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wc++98-c++11-compat digit separators are incompatible with C++ standards before C++1y #define GCT_IGNORE_WC98_C11_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++98-c++11-compat) #define GCT_IGNORE_WC98_C11_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wc++98-c++11-compat-pedantic binary integer literals are incompatible with C++ standards before C++1y #define GCT_IGNORE_WC98_C11_COMPAT_PEDANTIC_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++98-c++11-compat-pedantic) #define GCT_IGNORE_WC98_C11_COMPAT_PEDANTIC_WARNING_END GCTIgnoreClangWarningEnd //-Wc++98-compat raw string literals are incompatible with C++98 //-Wc++98-compat unicode literals are incompatible with C++98 //-Wc++98-compat universal character name referring to a control character is incompatible with C++98 //-Wc++98-compat '::' is treated as digraph ':' (aka '[') followed by ':' in C++98 //-Wc++98-compat using this character in an identifier is incompatible with C++98 //-Wc++98-compat specifying character '%0' with a universal character name is incompatible with C++98 #define GCT_IGNORE_WC98_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++98-compat) #define GCT_IGNORE_WC98_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wc++98-compat-pedantic variadic macros are incompatible with C++98 //-Wc++98-compat-pedantic #line number greater than 32767 is incompatible with C++98 //-Wc++98-compat-pedantic C++98 requires newline at end of file //-Wc++98-compat-pedantic empty macro arguments are incompatible with C++98 #define GCT_IGNORE_WC98_COMPAT_PEDANTIC_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++98-compat-pedantic) #define GCT_IGNORE_WC98_COMPAT_PEDANTIC_WARNING_END GCTIgnoreClangWarningEnd //-Wc99-compat unicode literals are incompatible with C99 //-Wc99-compat %select{using this character in an identifier|starting an identifier with this character}0 is incompatible with C99 #define GCT_IGNORE_WC99_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc99-compa) #define GCT_IGNORE_WC99_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wcomment '/*' within block comment //-Wcomment escaped newline between */ characters at block comment end #define GCT_IGNORE_WCOMMENT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wcomment) #define GCT_IGNORE_WCOMMENT_WARNING_END GCTIgnoreClangWarningEnd //-Wdisabled-macro-expansion disabled expansion of recursive macro #define GCT_IGNORE_WDISABLED_MACRO_EXPANSION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdisabled-macro-expansion) #define GCT_IGNORE_WDISABLED_MACRO_EXPANSION_WARNING_END GCTIgnoreClangWarningEnd //-Wheader-guard %0 is used as a header guard here, followed by #define of a different macro #define GCT_IGNORE_WHEADER_GUARD_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wheader-guard) #define GCT_IGNORE_WHEADER_GUARD_WARNING_END GCTIgnoreClangWarningEnd //-Wignored-attributes unknown attribute '%0' #define GCT_IGNORE_WIGNORED_ATTRIBUTES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wignored-attributes) #define GCT_IGNORE_WIGNORED_ATTRIBUTES_WARNING_END GCTIgnoreClangWarningEnd //-Wincomplete-module header '%0' is included in module '%1' but not listed in module map #define GCT_IGNORE_WINCOMPLETE_MODULE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wincomplete-module) #define GCT_IGNORE_WINCOMPLETE_MODULE_WARNING_END GCTIgnoreClangWarningEnd //-Wincomplete-umbrella umbrella header for module '%0' does not include header '%1' #define GCT_IGNORE_WINCOMPLETE_UMBRELLA_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wincomplete-umbrella) #define GCT_IGNORE_WINCOMPLETE_UMBRELLA_WARNING_END GCTIgnoreClangWarningEnd //-Winvalid-token-paste pasting formed '%0', an invalid preprocessing token, DefaultError #define GCT_IGNORE_WINVALID_TOKEN_PASTE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Winvalid-token-paste) #define GCT_IGNORE_WINVALID_TOKEN_PASTE_WARNING_END GCTIgnoreClangWarningEnd //-Wmalformed-warning-check __has_warning expected option name (e.g. \"-Wundef\") #define GCT_IGNORE_WMALFOEMED_WARNING_CHECK_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmalformed-warning-check) #define GCT_IGNORE_WMALFOEMED_WARNING_CHECK_WARNING_END GCTIgnoreClangWarningEnd //-Wnewline-eof no newline at end of file #define GCT_IGNORE_WNEWLINE_EOF_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wnewline-eof) #define GCT_IGNORE_WNEWLINE_EOF_WARNING_END GCTIgnoreClangWarningEnd //-Wnull-character null character ignored //-Wnull-character null character(s) preserved in string literal //-Wnull-character null character(s) preserved in character literal #define GCT_IGNORE_WNULL_CHARACTER_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wnull-character) #define GCT_IGNORE_WNULL_CHARACTER_WARNING_END GCTIgnoreClangWarningEnd //-Wtrigraphs ignored trigraph would end block comment //-Wtrigraphs trigraph ignored #define GCT_IGNORE_WTEIGRAPHS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wtrigraphs) #define GCT_IGNORE_WTEIGRAPHS_WARNING_END GCTIgnoreClangWarningEnd //-Wundef %0 is not defined, evaluates to 0 #define GCT_IGNORE_WUNDEF_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wundef) #define GCT_IGNORE_WUNDEF_WARNING_END GCTIgnoreClangWarningEnd //-Wunicode universal character names are only valid in C99 or C++ treating as '\\' followed by identifier //-Wunicode \\%0 used with no following hex digits treating as '\\' followed by identifier //-Wunicode incomplete universal character name treating as '\\' followed by identifier //-Wunicode universal character name refers to a surrogate character #define GCT_IGNORE_WUNICODE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunicode) #define GCT_IGNORE_WUNICODE_WARNING_END GCTIgnoreClangWarningEnd //-Wunknown-pragmas unknown pragma ignored //-Wunknown-pragmas pragma STDC FENV_ACCESS ON is not supported, ignoring pragma #define GCT_IGNORE_WUNKNOWN_PRAGMAS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunknown-pragmas) #define GCT_IGNORE_WUNKNOWN_PRAGMAS_WARNING_END GCTIgnoreClangWarningEnd //-Wunused-macros macro is not used #define GCT_IGNORE_WUNUSED_MACROS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wunused-macros) #define GCT_IGNORE_WUNUSED_MACROS_WARNING_END GCTIgnoreClangWarningEnd #pragma mark - Parser Warnings //-Warc-bridge-casts-disallowed-in-nonarc '%0' casts have no effect when not using ARC #define GCT_IGNORE_WARC_BRIDGE_CASTS_DISALLOWED_IN_NONARC_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Warc-bridge-casts-disallowed-in-nonarc) #define GCT_IGNORE_WARC_BRIDGE_CASTS_DISALLOWED_IN_NONARC_WARNING_END GCTIgnoreClangWarningEnd //-Wattributes unknown __declspec attribute %0 ignored #define GCT_IGNORE_WATTRIBUTES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wattributes) #define GCT_IGNORE_WATTRIBUTES_WARNING_END GCTIgnoreClangWarningEnd //-Wavailability 'unavailable' availability overrides all other availability information #define GCT_IGNORE_WAVAILABILITY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wavailability) #define GCT_IGNORE_WAVAILABILITY_WARNING_END GCTIgnoreClangWarningEnd //-Wc++11-compat use of right-shift operator ('') in template argument will require parentheses in C++11 //-Wc++11-compat 'auto' storage class specifier is redundant and incompatible with C++11 #define GCT_IGNORE_WC11_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++11-compat) #define GCT_IGNORE_WC11_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wc++98-c++11-compat 'decltype(auto)' type specifier is incompatible with C++ standards before C++1y #define GCT_IGNORE_WC98_C11_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++98-c++11-compat) #define GCT_IGNORE_WC98_C11_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wc++98-compat range-based for loop is incompatible with C++98 //-Wc++98-compat alias declarations are incompatible with C++98 //-Wc++98-compat in-class initialization of non-static data members is incompatible with C++98 //-Wc++98-compat defaulted function definitions are incompatible with C++98 //-Wc++98-compat rvalue references are incompatible with C++98 //-Wc++98-compat reference qualifiers on functions are incompatible with C++98 //-Wc++98-compat inline namespaces are incompatible with C++98 //-Wc++98-compat generalized initializer lists are incompatible with C++98 //-Wc++98-compat trailing return types are incompatible with C++98 //-Wc++98-compat enumeration types with a fixed underlying type are incompatible with C++98 //-Wc++98-compat alignof expressions are incompatible with C++98 //-Wc++98-compat '%0' keyword is incompatible with C++98 //-Wc++98-compat 'decltype' type specifier is incompatible with C++98 //-Wc++98-compat deleted function definitions are incompatible with C++98 //-Wc++98-compat consecutive right angle brackets are incompatible with C++98 (use '> >') //-Wc++98-compat static_assert declarations are incompatible with C++98 //-Wc++98-compat scoped enumerations are incompatible with C++98 //-Wc++98-compat lambda expressions are incompatible with C++98 //-Wc++98-compat attributes are incompatible with C++98 //-Wc++98-compat 'alignas' is incompatible with C++98 //-Wc++98-compat noexcept specifications are incompatible with C++98 //-Wc++98-compat literal operators are incompatible with C++98 //-Wc++98-compat noexcept expressions are incompatible with C++98 //-Wc++98-compat 'nullptr' is incompatible with C++98 #define GCT_IGNORE_WC98_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++98-compat) #define GCT_IGNORE_WC98_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wc++98-compat-pedantic extra '' outside of a function is incompatible with C++98 //-Wc++98-compat-pedantic extern templates are incompatible with C++98 //-Wc++98-compat-pedantic commas at the end of enumerator lists are incompatible with C++98 #define GCT_IGNORE_WC98_COMPAT_PEDANTIC_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wc++98-compat-pedantic) #define GCT_IGNORE_WC98_COMPAT_PEDANTIC_WARNING_END GCTIgnoreClangWarningEnd //-Wdangling-else add explicit braces to avoid dangling else #define GCT_IGNORE_WDANGLING_ELSE_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdangling-else) #define GCT_IGNORE_WDANGLING_ELSE_WARNING_END GCTIgnoreClangWarningEnd //-Wdeprecated Use of 'long' with '__vector' is deprecated #define GCT_IGNORE_WDEPRECATED_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdeprecated) #define GCT_IGNORE_WDEPRECATED_WARNING_END GCTIgnoreClangWarningEnd //-Wdeprecated-declarations use of C-style parameters in Objective-C method declarations is deprecated #define GCT_IGNORE_WDEPRECATED_DECLARATIONS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdeprecated-declarations) #define GCT_IGNORE_WDEPRECATED_DECLARATIONS_WARNING_END GCTIgnoreClangWarningEnd //-Wdeprecated-register 'register' storage class specifier is deprecated #define GCT_IGNORE_WDEPRECATED_REGISTER_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdeprecated-register) #define GCT_IGNORE_WDEPRECATED_REGISTER_WARNING_END GCTIgnoreClangWarningEnd //-Wduplicate-decl-specifier duplicate '%0' declaration specifier #define GCT_IGNORE_WDUPLICATE_DECL_SPECIFIER_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wduplicate-decl-specifier) #define GCT_IGNORE_WDUPLICATE_DECL_SPECIFIER_WARNING_END GCTIgnoreClangWarningEnd //-Wextra-semi extra ';' after member function definition #define GCT_IGNORE_WEXTRA_SEMI_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wextra-semi) #define GCT_IGNORE_WEXTRA_SEMI_WARNING_END GCTIgnoreClangWarningEnd //-Wextra-tokens "extra tokens at the end of '#pragma omp %0' are ignored #define GCT_IGNORE_WEXTRA_TOKENS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wextra-tokens) #define GCT_IGNORE_WEXTRA_TOKENS_WARNING_END GCTIgnoreClangWarningEnd //-Wgcc-compat GCC does not allow %0 attribute in this position on a function definition #define GCT_IGNORE_WGCC_COMPAT_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wgcc-compat) #define GCT_IGNORE_WGCC_COMPAT_WARNING_END GCTIgnoreClangWarningEnd //-Wignored-attributes attribute %0 ignored, because it is not attached to a declaration #define GCT_IGNORE_WIGNORED_ATTRIBUTES_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wignored-attributes) #define GCT_IGNORE_WIGNORED_ATTRIBUTES_WARNING_END GCTIgnoreClangWarningEnd //-Wmicrosoft-exists dependent %select{__if_not_exists|__if_exists}0 declarations are ignored #define GCT_IGNORE_WMICROSOFT_EXISTS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmicrosoft-exists) #define GCT_IGNORE_WMICROSOFT_EXISTS_WARNING_END GCTIgnoreClangWarningEnd //-Wmissing-selector-name %0 used as the name of the previous parameter rather than as part of the selector #define GCT_IGNORE_WMISSING_SELECTOR_NAME_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wmissing-selector-name) #define GCT_IGNORE_WMISSING_SELECTOR_NAME_WARNING_END GCTIgnoreClangWarningEnd //-Wsemicolon-before-method-body semicolon before method body is ignored #define GCT_IGNORE_WSEMICOLON_BEFORE_METHOD_BODY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wsemicolon-before-method-body) #define GCT_IGNORE_WSEMICOLON_BEFORE_METHOD_BODY_WARNING_END GCTIgnoreClangWarningEnd //-Wsource-uses-openmp "unexpected '#pragma omp ...' in program #define GCT_IGNORE_WSOURCE_USES_OPENMP_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wsource-uses-openmp) #define GCT_IGNORE_WSOURCE_USES_OPENMP_WARNING_END GCTIgnoreClangWarningEnd //-Wstatic-inline-explicit-instantiation ignoring '%select{static|inline}0' keyword on explicit template instantiation #define GCT_IGNORE_WSTATIC_INLINE_EXPLICIT_INSTANTIATION_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wstatic-inline-explicit-instantiation) #define GCT_IGNORE_WSTATIC_INLINE_EXPLICIT_INSTANTIATION_WARNING_END GCTIgnoreClangWarningEnd #define GCT_IGNORE_WPARTIAL_AVAILABILITY_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wpartial-availability) #define GCT_IGNORE_WPARTIAL_AVAILABILITY_WARNING_END GCTIgnoreClangWarningEnd #define GCT_IGNORE_WDEPRECATED_DECLARATIONS_WARNING_BEGIN GCTIgnoreClangWarningBegin(-Wdeprecated-declarations) #define GCT_IGNORE_WDEPRECATED_DECLARATIONS_WARNING_END GCTIgnoreClangWarningEnd #endif /* GCTFoundationClangConstants_h */
ast-dump-openmp-ordered.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one() { #pragma omp ordered ; } void test_two(int x) { #pragma omp for ordered for (int i = 0; i < x; i++) ; } void test_three(int x) { #pragma omp for ordered(1) for (int i = 0; i < x; i++) { #pragma omp ordered depend(source) } } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-ordered.c:3:1, line:6:1> line:3:6 test_one 'void ()' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:17, line:6:1> // CHECK-NEXT: | `-OMPOrderedDirective {{.*}} <line:4:1, col:20> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-NullStmt {{.*}} <col:3> // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:4:1) *const restrict' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:8:1, line:12:1> line:8:6 test_two 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:12:1> // CHECK-NEXT: | `-OMPForDirective {{.*}} <line:9:1, col:24> // CHECK-NEXT: | |-OMPOrderedClause {{.*}} <col:17, col:24> // CHECK-NEXT: | | `-<<<NULL>>> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:10:3, line:11:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:10:3, line:11:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:10:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:11:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:9:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:9:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:10:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:14:1, line:19:1> line:14:6 test_three 'void (int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:24, line:19:1> // CHECK-NEXT: `-OMPForDirective {{.*}} <line:15:1, col:27> // CHECK-NEXT: |-OMPOrderedClause {{.*}} <col:17, col:26> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:25> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:25> 'int' 1 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:16:3, line:18:3> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:16:3, line:18:3> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:16:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-CompoundStmt {{.*}} <col:31, line:18:3> // CHECK-NEXT: | | `-OMPOrderedDirective {{.*}} <line:17:1, col:35> openmp_standalone_directive // CHECK-NEXT: | | |-OMPDependClause {{.*}} <col:21, <invalid sloc>> // CHECK-NEXT: | | `-<<<NULL>>> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:15:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:15:1) *const restrict' // CHECK-NEXT: | `-VarDecl {{.*}} <line:16:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
GB_unaryop__identity_int32_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_bool // op(A') function: GB_tran__identity_int32_bool // C type: int32_t // A type: bool // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_bool ( int32_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__div_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__div_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__div_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__div_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_fc64) // A*D function (colscale): GB (_AxD__div_fc64) // D*A function (rowscale): GB (_DxB__div_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__div_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__div_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_fc64) // C=scalar+B GB (_bind1st__div_fc64) // C=scalar+B' GB (_bind1st_tran__div_fc64) // C=A+scalar GB (_bind2nd__div_fc64) // C=A'+scalar GB (_bind2nd_tran__div_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // A pattern? 0 // B type: GxB_FC64_t // B pattern? 0 // BinaryOp: cij = GB_FC64_div (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_div (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_FC64 || GxB_NO_DIV_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__div_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_fc64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC64_t alpha_scalar ; GxB_FC64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_div (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_div (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_div (x, aij) ; \ } GrB_Info GB (_bind1st_tran__div_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_div (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__div_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__times_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint8) // A*D function (colscale): GB (_AxD__times_uint8) // D*A function (rowscale): GB (_DxB__times_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__times_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__times_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint8) // C=scalar+B GB (_bind1st__times_uint8) // C=scalar+B' GB (_bind1st_tran__times_uint8) // C=A+scalar GB (_bind2nd__times_uint8) // C=A'+scalar GB (_bind2nd_tran__times_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT8 || GxB_NO_TIMES_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
libimagequant.c
/* ** © 2009-2018 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <stdint.h> #include <limits.h> #if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800) #error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher." #error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version." #endif #ifdef _OPENMP #include <omp.h> #define LIQ_TEMP_ROW_WIDTH(img_width) (((img_width) | 15) + 1) /* keep alignment & leave space between rows to avoid cache line contention */ #else #define LIQ_TEMP_ROW_WIDTH(img_width) (img_width) #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "libimagequant.h" #include "pam.h" #include "mediancut.h" #include "nearest.h" #include "blur.h" #include "kmeans.h" #define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */ // each structure has a pointer as a unique identifier that allows type checking at run time static const char liq_attr_magic[] = "liq_attr"; static const char liq_image_magic[] = "liq_image"; static const char liq_result_magic[] = "liq_result"; static const char liq_histogram_magic[] = "liq_histogram"; static const char liq_remapping_result_magic[] = "liq_remapping_result"; static const char liq_freed_magic[] = "free"; #define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic) #define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr) struct liq_attr { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); double target_mse, max_mse, kmeans_iteration_limit; float min_opaque_val; unsigned int max_colors, max_histogram_entries; unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */; unsigned int kmeans_iterations, feedback_loop_trials; bool last_index_transparent, use_contrast_maps; unsigned char use_dither_map; unsigned char speed; unsigned char progress_stage1, progress_stage2, progress_stage3; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_log_callback_function *log_callback; void *log_callback_user_info; liq_log_flush_callback_function *log_flush_callback; void *log_flush_callback_user_info; }; struct liq_image { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); f_pixel *f_pixels; rgba_pixel **rows; double gamma; unsigned int width, height; unsigned char *importance_map, *edges, *dither_map; rgba_pixel *pixels, *temp_row; f_pixel *temp_f_row; liq_image_get_rgba_row_callback *row_callback; void *row_callback_user_info; liq_image *background; float min_opaque_val; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; bool free_pixels, free_rows, free_rows_internal; }; typedef struct liq_remapping_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); unsigned char *pixels; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; double gamma, palette_error; float dither_level; unsigned char use_dither_map; unsigned char progress_stage1; } liq_remapping_result; struct liq_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); liq_remapping_result *remapping; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; float dither_level; double gamma, palette_error; int min_posterization_output; unsigned char use_dither_map; }; struct liq_histogram { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); struct acolorhash_table *acht; double gamma; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; unsigned short ignorebits; bool had_image_added; }; static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) LIQ_NONNULL; static void contrast_maps(liq_image *image) LIQ_NONNULL; static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) LIQ_NONNULL; static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row) LIQ_NONNULL; static bool liq_image_get_row_f_init(liq_image *img) LIQ_NONNULL; static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row) LIQ_NONNULL; static void liq_remapping_result_destroy(liq_remapping_result *result) LIQ_NONNULL; static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **) LIQ_NONNULL; static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) LIQ_NONNULL; LIQ_NONNULL static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); LIQ_ARRAY(char, buf, required_space); va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(context, buf, context->log_callback_user_info); } } LIQ_NONNULL inline static void verbose_print(const liq_attr *attr, const char *msg) { if (attr->log_callback) { attr->log_callback(attr, msg, attr->log_callback_user_info); } } LIQ_NONNULL static void liq_verbose_printf_flush(liq_attr *attr) { if (attr->log_flush_callback) { attr->log_flush_callback(attr, attr->log_flush_callback_user_info); } } LIQ_NONNULL static bool liq_progress(const liq_attr *attr, const float percent) { return attr->progress_callback && !attr->progress_callback(percent, attr->progress_callback_user_info); } LIQ_NONNULL static bool liq_remap_progress(const liq_remapping_result *quant, const float percent) { return quant->progress_callback && !quant->progress_callback(percent, quant->progress_callback_user_info); } #if USE_SSE inline static bool is_sse_available() { #if (defined(__x86_64__) || defined(__amd64) || defined(_WIN64)) return true; #elif _MSC_VER int info[4]; __cpuid(info, 1); /* bool is implemented as a built-in type of size 1 in MSVC */ return info[3] & (1<<26) ? true : false; #else int a,b,c,d; cpuid(1, a, b, c, d); return d & (1<<25); // edx bit 25 is set when SSE is present #endif } #endif /* make it clear in backtrace when user-supplied handle points to invalid memory */ NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header); LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header) { if (!user_supplied_pointer) { return false; } if (user_supplied_pointer->magic_header == liq_freed_magic) { fprintf(stderr, "%s used after being freed", expected_magic_header); // this is not normal error handling, this is programmer error that should crash the program. // program cannot safely continue if memory has been used after it's been freed. // abort() is nasty, but security vulnerability may be worse. abort(); } return user_supplied_pointer->magic_header == expected_magic_header; } NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer); LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer) { if (!pointer) { return false; } // Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not. // It doesn't matter what value is read, the code here is just to shut the compiler up about unused read. char test_access = *((volatile char *)pointer); return test_access || true; } LIQ_NONNULL static void liq_log_error(const liq_attr *attr, const char *msg) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf(attr, " error: %s", msg); } static double quality_to_mse(long quality) { if (quality == 0) { return MAX_DIFF; } if (quality == 100) { return 0; } // curve fudged to be roughly similar to quality of libjpeg // except lowest 10 for really low number of colors const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001); return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0; } static unsigned int mse_to_quality(double mse) { for(int i=100; i > 0; i--) { if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors return i; } } return 0; } /** internally MSE is a sum of all channels with pixels 0..1 range, but other software gives per-RGB-channel MSE for 0..255 range */ static double mse_to_standard_mse(double mse) { return mse * 65536.0/6.0; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_quality(liq_attr* attr, int minimum, int target) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE; attr->target_mse = quality_to_mse(target); attr->max_mse = quality_to_mse(minimum); return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->max_mse); } LIQ_EXPORT LIQ_NONNULL int liq_get_max_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->target_mse); } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_max_colors(liq_attr* attr, int colors) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE; attr->max_colors = colors; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_max_colors(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->max_colors; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_posterization(liq_attr *attr, int bits) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE; attr->min_posterization_output = bits; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_posterization(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->min_posterization_output; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_speed(liq_attr* attr, int speed) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE; unsigned int iterations = MAX(8-speed, 0); iterations += iterations * iterations/2; attr->kmeans_iterations = iterations; attr->kmeans_iteration_limit = 1.0/(double)(1<<(23-speed)); attr->feedback_loop_trials = MAX(56-9*speed, 0); attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed); attr->min_posterization_input = (speed >= 8) ? 1 : 0; attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping if (attr->use_dither_map && speed < 3) { attr->use_dither_map = 2; // always } attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map; attr->speed = speed; attr->progress_stage1 = attr->use_contrast_maps ? 20 : 8; if (attr->feedback_loop_trials < 2) { attr->progress_stage1 += 30; } attr->progress_stage3 = 50 / (1+speed); attr->progress_stage2 = 100 - attr->progress_stage1 - attr->progress_stage3; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_speed(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->speed; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_output_gamma(liq_result* res, double gamma) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } res->gamma = gamma; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_opacity(liq_attr* attr, int min) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE; attr->min_opaque_val = (double)min/255.0; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_opacity(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return MIN(255.f, 256.f * attr->min_opaque_val); } LIQ_EXPORT LIQ_NONNULL void liq_set_last_index_transparent(liq_attr* attr, int is_last) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->last_index_transparent = !!is_last; } LIQ_EXPORT void liq_attr_set_progress_callback(liq_attr *attr, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->progress_callback = callback; attr->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_result_set_progress_callback(liq_result *result, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return; result->progress_callback = callback; result->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf_flush(attr); attr->log_callback = callback; attr->log_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->log_flush_callback = callback; attr->log_flush_callback_user_info = user_info; } LIQ_EXPORT liq_attr* liq_attr_create() { return liq_attr_create_with_allocator(NULL, NULL); } LIQ_EXPORT LIQ_NONNULL void liq_attr_destroy(liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return; } liq_verbose_printf_flush(attr); attr->magic_header = liq_freed_magic; attr->free(attr); } LIQ_EXPORT LIQ_NONNULL liq_attr* liq_attr_copy(const liq_attr *orig) { if (!CHECK_STRUCT_TYPE(orig, liq_attr)) { return NULL; } liq_attr *attr = orig->malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = *orig; return attr; } static void *liq_aligned_malloc(size_t size) { unsigned char *ptr = malloc(size + 16); if (!ptr) { return NULL; } uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1] ptr += offset; assert(0 == (((uintptr_t)ptr) & 15)); ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free() return ptr; } LIQ_NONNULL static void liq_aligned_free(void *inptr) { unsigned char *ptr = inptr; size_t offset = ptr[-1] ^ 0x59; assert(offset > 0 && offset <= 16); free(ptr - offset); } LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*)) { #if USE_SSE if (!is_sse_available()) { return NULL; } #endif if (!custom_malloc && !custom_free) { custom_malloc = liq_aligned_malloc; custom_free = liq_aligned_free; } else if (!custom_malloc != !custom_free) { return NULL; // either specify both or none } liq_attr *attr = custom_malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = (liq_attr) { .magic_header = liq_attr_magic, .malloc = custom_malloc, .free = custom_free, .max_colors = 256, .min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha) .last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles. .target_mse = 0, .max_mse = MAX_DIFF, }; liq_set_speed(attr, 4); return attr; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_add_fixed_color(liq_image *img, liq_color color) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (img->fixed_colors_count > 255) return LIQ_UNSUPPORTED; float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); img->fixed_colors[img->fixed_colors_count++] = rgba_to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return LIQ_OK; } LIQ_NONNULL static liq_error liq_histogram_add_fixed_color_f(liq_histogram *hist, f_pixel color) { if (hist->fixed_colors_count > 255) return LIQ_UNSUPPORTED; hist->fixed_colors[hist->fixed_colors_count++] = color; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_fixed_color(liq_histogram *hist, liq_color color, double gamma) { if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return LIQ_INVALID_POINTER; float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma ? gamma : 0.45455); const f_pixel px = rgba_to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return liq_histogram_add_fixed_color_f(hist, px); } LIQ_NONNULL static bool liq_image_use_low_memory(liq_image *img) { img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_max_threads()); return img->temp_f_row != NULL; } LIQ_NONNULL static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint) { return (size_t)img->width * (size_t)img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow } static liq_image *liq_image_create_internal(const liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma) { if (gamma < 0 || gamma > 1.0) { liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)"); return NULL; } if (!rows && !row_callback) { liq_log_error(attr, "missing row data"); return NULL; } liq_image *img = attr->malloc(sizeof(liq_image)); if (!img) return NULL; *img = (liq_image){ .magic_header = liq_image_magic, .malloc = attr->malloc, .free = attr->free, .width = width, .height = height, .gamma = gamma ? gamma : 0.45455, .rows = rows, .row_callback = row_callback, .row_callback_user_info = row_callback_user_info, .min_opaque_val = attr->min_opaque_val, }; if (!rows || attr->min_opaque_val < 1.f) { img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * LIQ_TEMP_ROW_WIDTH(width) * omp_get_max_threads()); if (!img->temp_row) return NULL; } // if image is huge or converted pixels are not likely to be reused then don't cache converted pixels if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) { verbose_print(attr, " conserving memory"); if (!liq_image_use_low_memory(img)) return NULL; } if (img->min_opaque_val < 1.f) { verbose_print(attr, " Working around IE6 bug by making image less transparent..."); } return img; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) { return LIQ_VALUE_OUT_OF_RANGE; } if (ownership_flags & LIQ_OWN_ROWS) { if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE; img->free_rows = true; } if (ownership_flags & LIQ_OWN_PIXELS) { img->free_pixels = true; if (!img->pixels) { // for simplicity of this API there's no explicit bitmap argument, // so the row with the lowest address is assumed to be at the start of the bitmap img->pixels = img->rows[0]; for(unsigned int i=1; i < img->height; i++) { img->pixels = MIN(img->pixels, img->rows[i]); } } } return LIQ_OK; } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image); LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image); LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_importance_map(liq_image *img, unsigned char importance_map[], size_t buffer_size, enum liq_ownership ownership) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(importance_map)) return LIQ_INVALID_POINTER; const size_t required_size = (size_t)img->width * (size_t)img->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } if (ownership == LIQ_COPY_PIXELS) { unsigned char *tmp = img->malloc(required_size); if (!tmp) { return LIQ_OUT_OF_MEMORY; } memcpy(tmp, importance_map, required_size); importance_map = tmp; } else if (ownership != LIQ_OWN_PIXELS) { return LIQ_UNSUPPORTED; } liq_image_free_importance_map(img); img->importance_map = importance_map; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_background(liq_image *img, liq_image *background) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(background, liq_image)) return LIQ_INVALID_POINTER; if (background->background) { return LIQ_UNSUPPORTED; } if (img->width != background->width || img->height != background->height) { return LIQ_BUFFER_TOO_SMALL; } if (img->background) { liq_image_destroy(img->background); } img->background = background; liq_image_free_maps(img); // Force them to be re-analyzed with the background return LIQ_OK; } LIQ_NONNULL static bool check_image_size(const liq_attr *attr, const int width, const int height) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return false; } if (width <= 0 || height <= 0) { liq_log_error(attr, "width and height must be > 0"); return false; } if (width > INT_MAX/sizeof(rgba_pixel)/height || width > INT_MAX/16/sizeof(f_pixel) || height > INT_MAX/sizeof(size_t)) { liq_log_error(attr, "image too large"); return false; } return true; } LIQ_EXPORT liq_image *liq_image_create_custom(const liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma); } LIQ_EXPORT liq_image *liq_image_create_rgba_rows(const liq_attr *attr, void *const rows[], int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } for(int i=0; i < height; i++) { if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) { liq_log_error(attr, "invalid row pointers"); return NULL; } } return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma); } LIQ_EXPORT LIQ_NONNULL liq_image *liq_image_create_rgba(const liq_attr *attr, const void* bitmap, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } if (!CHECK_USER_POINTER(bitmap)) { liq_log_error(attr, "invalid bitmap pointer"); return NULL; } rgba_pixel *const pixels = (rgba_pixel *const)bitmap; rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height); if (!rows) return NULL; for(int i=0; i < height; i++) { rows[i] = pixels + width * i; } liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma); if (!image) { attr->free(rows); return NULL; } image->free_rows = true; image->free_rows_internal = true; return image; } NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info); LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info) { assert(callback); assert(temp_row); callback(temp_row, row, width, user_info); } LIQ_NONNULL inline static bool liq_image_has_rgba_pixels(const liq_image *img) { if (!CHECK_STRUCT_TYPE(img, liq_image)) { return false; } return img->rows || (img->temp_row && img->row_callback); } LIQ_NONNULL inline static bool liq_image_can_use_rgba_rows(const liq_image *img) { assert(liq_image_has_rgba_pixels(img)); const bool iebug = img->min_opaque_val < 1.f; return (img->rows && !iebug); } LIQ_NONNULL static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row) { if (liq_image_can_use_rgba_rows(img)) { return img->rows[row]; } assert(img->temp_row); rgba_pixel *temp_row = img->temp_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); if (img->rows) { memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0])); } else { liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info); } if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row); return temp_row; } LIQ_NONNULL static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[]) { assert(row_f_pixels); #ifndef _MSC_VER assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15)); #endif const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row); for(unsigned int col=0; col < img->width; col++) { row_f_pixels[col] = rgba_to_f(gamma_lut, row_pixels[col]); } } LIQ_NONNULL static bool liq_image_get_row_f_init(liq_image *img) { assert(omp_get_thread_num() == 0); if (img->f_pixels) { return true; } if (!liq_image_should_use_low_memory(img, false)) { img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height); } if (!img->f_pixels) { return liq_image_use_low_memory(img); } if (!liq_image_has_rgba_pixels(img)) { return false; } float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); for(unsigned int i=0; i < img->height; i++) { convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut); } return true; } LIQ_NONNULL static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row) { if (!img->f_pixels) { assert(img->temp_f_row); // init should have done that float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); f_pixel *row_for_thread = img->temp_f_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); convert_row_to_f(img, row_for_thread, row, gamma_lut); return row_for_thread; } return img->f_pixels + img->width * row; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_width(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->width; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_height(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->height; } typedef void free_func(void*); LIQ_NONNULL static free_func *get_default_free_func(liq_image *img) { // When default allocator is used then user-supplied pointers must be freed with free() if (img->free_rows_internal || img->free != liq_aligned_free) { return img->free; } return free; } LIQ_NONNULL static void liq_image_free_rgba_source(liq_image *input_image) { if (input_image->free_pixels && input_image->pixels) { get_default_free_func(input_image)(input_image->pixels); input_image->pixels = NULL; } if (input_image->free_rows && input_image->rows) { get_default_free_func(input_image)(input_image->rows); input_image->rows = NULL; } } LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image) { if (input_image->importance_map) { input_image->free(input_image->importance_map); input_image->importance_map = NULL; } } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image) { liq_image_free_importance_map(input_image); if (input_image->edges) { input_image->free(input_image->edges); input_image->edges = NULL; } if (input_image->dither_map) { input_image->free(input_image->dither_map); input_image->dither_map = NULL; } } LIQ_EXPORT LIQ_NONNULL void liq_image_destroy(liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return; liq_image_free_rgba_source(input_image); liq_image_free_maps(input_image); if (input_image->f_pixels) { input_image->free(input_image->f_pixels); } if (input_image->temp_row) { input_image->free(input_image->temp_row); } if (input_image->temp_f_row) { input_image->free(input_image->temp_f_row); } if (input_image->background) { liq_image_destroy(input_image->background); } input_image->magic_header = liq_freed_magic; input_image->free(input_image); } LIQ_EXPORT liq_histogram* liq_histogram_create(const liq_attr* attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return NULL; } liq_histogram *hist = attr->malloc(sizeof(liq_histogram)); if (!hist) return NULL; *hist = (liq_histogram) { .magic_header = liq_histogram_magic, .malloc = attr->malloc, .free = attr->free, .ignorebits = MAX(attr->min_posterization_output, attr->min_posterization_input), }; return hist; } LIQ_EXPORT LIQ_NONNULL void liq_histogram_destroy(liq_histogram *hist) { if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return; hist->magic_header = liq_freed_magic; pam_freeacolorhash(hist->acht); hist->free(hist); } LIQ_EXPORT LIQ_NONNULL liq_result *liq_quantize_image(liq_attr *attr, liq_image *img) { liq_result *res; if (LIQ_OK != liq_image_quantize(img, attr, &res)) { return NULL; } return res; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_quantize(liq_image *const img, liq_attr *const attr, liq_result **result_output) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!liq_image_has_rgba_pixels(img)) { return LIQ_UNSUPPORTED; } liq_histogram *hist = liq_histogram_create(attr); if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_error err = liq_histogram_add_image(hist, attr, img); if (LIQ_OK != err) { return err; } err = liq_histogram_quantize_internal(hist, attr, false, result_output); liq_histogram_destroy(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_quantize(liq_histogram *input_hist, liq_attr *attr, liq_result **result_output) { return liq_histogram_quantize_internal(input_hist, attr, true, result_output); } LIQ_NONNULL static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) { if (!CHECK_USER_POINTER(result_output)) return LIQ_INVALID_POINTER; *result_output = NULL; if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (liq_progress(attr, 0)) return LIQ_ABORTED; histogram *hist; liq_error err = finalize_histogram(input_hist, attr, &hist); if (err != LIQ_OK) { return err; } err = pngquant_quantize(hist, attr, input_hist->fixed_colors_count, input_hist->fixed_colors, input_hist->gamma, fixed_result_colors, result_output); pam_freeacolorhist(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_dithering_level(liq_result *res, float dither_level) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } if (dither_level < 0 || dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE; res->dither_level = dither_level; return LIQ_OK; } LIQ_NONNULL static liq_remapping_result *liq_remapping_result_create(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return NULL; } liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result)); if (!res) return NULL; *res = (liq_remapping_result) { .magic_header = liq_remapping_result_magic, .malloc = result->malloc, .free = result->free, .dither_level = result->dither_level, .use_dither_map = result->use_dither_map, .palette_error = result->palette_error, .gamma = result->gamma, .palette = pam_duplicate_colormap(result->palette), .progress_callback = result->progress_callback, .progress_callback_user_info = result->progress_callback_user_info, .progress_stage1 = result->use_dither_map ? 20 : 0, }; return res; } LIQ_EXPORT LIQ_NONNULL double liq_get_output_gamma(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; return result->gamma; } LIQ_NONNULL static void liq_remapping_result_destroy(liq_remapping_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return; if (result->palette) pam_freecolormap(result->palette); if (result->pixels) result->free(result->pixels); result->magic_header = liq_freed_magic; result->free(result); } LIQ_EXPORT LIQ_NONNULL void liq_result_destroy(liq_result *res) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return; memset(&res->int_palette, 0, sizeof(liq_palette)); if (res->remapping) { memset(&res->remapping->int_palette, 0, sizeof(liq_palette)); liq_remapping_result_destroy(res->remapping); } pam_freecolormap(res->palette); res->magic_header = liq_freed_magic; res->free(res); } LIQ_EXPORT LIQ_NONNULL double liq_get_quantization_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_standard_mse(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL double liq_get_remapping_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_standard_mse(result->remapping->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_quantization_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_quality(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_remapping_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_quality(result->remapping->palette_error); } return -1; } LIQ_NONNULL static int compare_popularity(const void *ch1, const void *ch2) { const float v1 = ((const colormap_item*)ch1)->popularity; const float v2 = ((const colormap_item*)ch2)->popularity; return v1 > v2 ? -1 : 1; } LIQ_NONNULL static void sort_palette_qsort(colormap *map, int start, int nelem) { if (!nelem) return; qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity); } #define SWAP_PALETTE(map, a,b) { \ const colormap_item tmp = (map)->palette[(a)]; \ (map)->palette[(a)] = (map)->palette[(b)]; \ (map)->palette[(b)] = tmp; } LIQ_NONNULL static void sort_palette(colormap *map, const liq_attr *options) { /* ** Step 3.5 [GRR]: remap the palette colors so that all entries with ** the maximal alpha value (i.e., fully opaque) are at the end and can ** therefore be omitted from the tRNS chunk. */ if (options->last_index_transparent) { for(unsigned int i=0; i < map->colors; i++) { if (map->palette[i].acolor.a < 1.f/256.f) { const unsigned int old = i, transparent_dest = map->colors-1; SWAP_PALETTE(map, transparent_dest, old); /* colors sorted by popularity make pngs slightly more compressible */ sort_palette_qsort(map, 0, map->colors-1); return; } } } unsigned int non_fixed_colors = 0; for(unsigned int i = 0; i < map->colors; i++) { if (map->palette[i].fixed) { break; } non_fixed_colors++; } /* move transparent colors to the beginning to shrink trns chunk */ unsigned int num_transparent = 0; for(unsigned int i = 0; i < non_fixed_colors; i++) { if (map->palette[i].acolor.a < 255.f/256.f) { // current transparent color is swapped with earlier opaque one if (i != num_transparent) { SWAP_PALETTE(map, num_transparent, i); i--; } num_transparent++; } } liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies"); /* colors sorted by popularity make pngs slightly more compressible * opaque and transparent are sorted separately */ sort_palette_qsort(map, 0, num_transparent); sort_palette_qsort(map, num_transparent, non_fixed_colors - num_transparent); if (non_fixed_colors > 9 && map->colors > 16) { SWAP_PALETTE(map, 7, 1); // slightly improves compression SWAP_PALETTE(map, 8, 2); SWAP_PALETTE(map, 9, 3); } } inline static unsigned int posterize_channel(unsigned int color, unsigned int bits) { return (color & ~((1<<bits)-1)) | (color >> (8-bits)); } LIQ_NONNULL static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize) { float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma); dest->count = map->colors; for(unsigned int x = 0; x < map->colors; ++x) { rgba_pixel px = f_to_rgb(gamma, map->palette[x].acolor); px.r = posterize_channel(px.r, posterize); px.g = posterize_channel(px.g, posterize); px.b = posterize_channel(px.b, posterize); px.a = posterize_channel(px.a, posterize); map->palette[x].acolor = rgba_to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */ if (!px.a && !map->palette[x].fixed) { px.r = 71; px.g = 112; px.b = 76; } dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a}; } } LIQ_EXPORT LIQ_NONNULL const liq_palette *liq_get_palette(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL; if (result->remapping && result->remapping->int_palette.count) { return &result->remapping->int_palette; } if (!result->int_palette.count) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output); } return &result->int_palette; } LIQ_NONNULL static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map) { const int rows = input_image->height; const unsigned int cols = input_image->width; double remapping_error=0; if (!liq_image_get_row_f_init(input_image)) { return -1; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return -1; } const colormap_item *acolormap = map->palette; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; const unsigned int max_threads = omp_get_max_threads(); LIQ_ARRAY(kmeans_state, average_color, (KMEANS_CACHE_LINE_GAP+map->colors) * max_threads); kmeans_init(map, max_threads, average_color); int row; #pragma omp parallel for if (rows*cols > 3000) \ schedule(static) default(none) shared(acolormap) shared(average_color) reduction(+:remapping_error) for(row = 0; row < rows; ++row) { const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; unsigned int last_match=0; for(unsigned int col = 0; col < cols; ++col) { float diff; last_match = nearest_search(n, &row_pixels[col], last_match, &diff); if (bg_pixels && colordifference(bg_pixels[col], acolormap[last_match].acolor) <= diff) { last_match = transparent_index; } output_pixels[row][col] = last_match; remapping_error += diff; kmeans_update_color(row_pixels[col], 1.0, map, last_match, omp_get_thread_num(), average_color); } } kmeans_finalize(map, max_threads, average_color); nearest_free(n); return remapping_error / (input_image->width * input_image->height); } inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px) { /* Use Floyd-Steinberg errors to adjust actual color. */ const float sr = thiserr.r * dither_level, sg = thiserr.g * dither_level, sb = thiserr.b * dither_level, sa = thiserr.a * dither_level; float ratio = 1.0; const float max_overflow = 1.1f; const float max_underflow = -0.1f; // allowing some overflow prevents undithered bands caused by clamping of all channels if (px.r + sr > max_overflow) ratio = MIN(ratio, (max_overflow -px.r)/sr); else { if (px.r + sr < max_underflow) ratio = MIN(ratio, (max_underflow-px.r)/sr); } if (px.g + sg > max_overflow) ratio = MIN(ratio, (max_overflow -px.g)/sg); else { if (px.g + sg < max_underflow) ratio = MIN(ratio, (max_underflow-px.g)/sg); } if (px.b + sb > max_overflow) ratio = MIN(ratio, (max_overflow -px.b)/sb); else { if (px.b + sb < max_underflow) ratio = MIN(ratio, (max_underflow-px.b)/sb); } float a = px.a + sa; if (a > 1.f) { a = 1.f; } else if (a < 0) { a = 0; } // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa; if (dither_error > max_dither_error) { ratio *= 0.8f; } else if (dither_error < 2.f/256.f/256.f) { // don't dither areas that don't have noticeable error — makes file smaller return px; } return (f_pixel) { .r=px.r + sr * ratio, .g=px.g + sg * ratio, .b=px.b + sb * ratio, .a=a, }; } /** Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered. If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image. */ LIQ_NONNULL static bool remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], liq_remapping_result *quant, const float max_dither_error, const bool output_image_is_remapped) { const int rows = input_image->height, cols = input_image->width; const unsigned char *dither_map = quant->use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL; const colormap *map = quant->palette; const colormap_item *acolormap = map->palette; if (!liq_image_get_row_f_init(input_image)) { return false; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return false; } /* Initialize Floyd-Steinberg error vectors. */ const size_t errwidth = cols+2; f_pixel *restrict thiserr = input_image->malloc(errwidth * sizeof(thiserr[0]) * 2); // +2 saves from checking out of bounds access if (!thiserr) return false; f_pixel *restrict nexterr = thiserr + errwidth; memset(thiserr, 0, errwidth * sizeof(thiserr[0])); bool ok = true; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; // response to this value is non-linear and without it any value < 0.8 would give almost no dithering float base_dithering_level = quant->dither_level; base_dithering_level = 1.f - (1.f-base_dithering_level)*(1.f-base_dithering_level); if (dither_map) { base_dithering_level *= 1.f/255.f; // convert byte to float } base_dithering_level *= 15.f/16.f; // prevent small errors from accumulating int fs_direction = 1; unsigned int last_match=0; for (int row = 0; row < rows; ++row) { if (liq_remap_progress(quant, quant->progress_stage1 + row * (100.f - quant->progress_stage1) / rows)) { ok = false; break; } memset(nexterr, 0, errwidth * sizeof(nexterr[0])); int col = (fs_direction > 0) ? 0 : (cols - 1); const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; do { float dither_level = base_dithering_level; if (dither_map) { dither_level *= dither_map[row*cols + col]; } const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]); const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match; float diff; last_match = nearest_search(n, &spx, guessed_match, &diff); f_pixel output_px = acolormap[last_match].acolor; if (bg_pixels && colordifference(bg_pixels[col], output_px) <= diff) { output_px = bg_pixels[col]; output_pixels[row][col] = transparent_index; } else { output_pixels[row][col] = last_match; } f_pixel err = { .r = (spx.r - output_px.r), .g = (spx.g - output_px.g), .b = (spx.b - output_px.b), .a = (spx.a - output_px.a), }; // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) { err.r *= 0.75f; err.g *= 0.75f; err.b *= 0.75f; err.a *= 0.75f; } /* Propagate Floyd-Steinberg error terms. */ if (fs_direction > 0) { thiserr[col + 2].a += err.a * (7.f/16.f); thiserr[col + 2].r += err.r * (7.f/16.f); thiserr[col + 2].g += err.g * (7.f/16.f); thiserr[col + 2].b += err.b * (7.f/16.f); nexterr[col + 2].a = err.a * (1.f/16.f); nexterr[col + 2].r = err.r * (1.f/16.f); nexterr[col + 2].g = err.g * (1.f/16.f); nexterr[col + 2].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col ].a += err.a * (3.f/16.f); nexterr[col ].r += err.r * (3.f/16.f); nexterr[col ].g += err.g * (3.f/16.f); nexterr[col ].b += err.b * (3.f/16.f); } else { thiserr[col ].a += err.a * (7.f/16.f); thiserr[col ].r += err.r * (7.f/16.f); thiserr[col ].g += err.g * (7.f/16.f); thiserr[col ].b += err.b * (7.f/16.f); nexterr[col ].a = err.a * (1.f/16.f); nexterr[col ].r = err.r * (1.f/16.f); nexterr[col ].g = err.g * (1.f/16.f); nexterr[col ].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col + 2].a += err.a * (3.f/16.f); nexterr[col + 2].r += err.r * (3.f/16.f); nexterr[col + 2].g += err.g * (3.f/16.f); nexterr[col + 2].b += err.b * (3.f/16.f); } // remapping is done in zig-zag col += fs_direction; if (fs_direction > 0) { if (col >= cols) break; } else { if (col < 0) break; } } while(1); f_pixel *const temperr = thiserr; thiserr = nexterr; nexterr = temperr; fs_direction = -fs_direction; } input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped nearest_free(n); return ok; } /* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */ LIQ_NONNULL static void remove_fixed_colors_from_histogram(histogram *hist, const int fixed_colors_count, const f_pixel fixed_colors[], const float target_mse) { const float max_difference = MAX(target_mse/2.f, 2.f/256.f/256.f); if (fixed_colors_count) { for(int j=0; j < hist->size; j++) { for(unsigned int i=0; i < fixed_colors_count; i++) { if (colordifference(hist->achv[j].acolor, fixed_colors[i]) < max_difference) { hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry j--; break; // continue searching histogram } } } } } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_colors(liq_histogram *input_hist, const liq_attr *options, const liq_histogram_entry entries[], int num_entries, double gamma) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(entries)) return LIQ_INVALID_POINTER; if (gamma < 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (num_entries <= 0 || num_entries > 1<<30) return LIQ_VALUE_OUT_OF_RANGE; if (input_hist->ignorebits > 0 && input_hist->had_image_added) { return LIQ_UNSUPPORTED; } input_hist->ignorebits = 0; input_hist->had_image_added = true; input_hist->gamma = gamma ? gamma : 0.45455; if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(~0, num_entries*num_entries, 0, options->malloc, options->free); if (!input_hist->acht) { return LIQ_OUT_OF_MEMORY; } } // Fake image size. It's only for hash size estimates. if (!input_hist->acht->cols) { input_hist->acht->cols = num_entries; } input_hist->acht->rows += num_entries; const unsigned int hash_size = input_hist->acht->hash_size; for(int i=0; i < num_entries; i++) { const rgba_pixel rgba = { .r = entries[i].color.r, .g = entries[i].color.g, .b = entries[i].color.b, .a = entries[i].color.a, }; union rgba_as_int px = {rgba}; unsigned int hash; if (px.rgba.a) { hash = px.l % hash_size; } else { hash=0; px.l=0; } if (!pam_add_to_hash(input_hist->acht, hash, entries[i].count, px, i, num_entries)) { return LIQ_OUT_OF_MEMORY; } } return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_image(liq_histogram *input_hist, const liq_attr *options, liq_image *input_image) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; const unsigned int cols = input_image->width, rows = input_image->height; if (!input_image->importance_map && options->use_contrast_maps) { contrast_maps(input_image); } input_hist->gamma = input_image->gamma; for(int i = 0; i < input_image->fixed_colors_count; i++) { liq_error res = liq_histogram_add_fixed_color_f(input_hist, input_image->fixed_colors[i]); if (res != LIQ_OK) { return res; } } /* ** Step 2: attempt to make a histogram of the colors, unclustered. ** If at first we don't succeed, increase ignorebits to increase color ** coherence and try again. */ if (liq_progress(options, options->progress_stage1 * 0.4f)) { return LIQ_ABORTED; } const bool all_rows_at_once = liq_image_can_use_rgba_rows(input_image); // Usual solution is to start from scratch when limit is exceeded, but that's not possible if it's not // the first image added const unsigned int max_histogram_entries = input_hist->had_image_added ? ~0 : options->max_histogram_entries; do { if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(max_histogram_entries, rows*cols, input_hist->ignorebits, options->malloc, options->free); } if (!input_hist->acht) return LIQ_OUT_OF_MEMORY; // histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important. // noise map does not include edges to avoid ruining anti-aliasing for(unsigned int row=0; row < rows; row++) { bool added_ok; if (all_rows_at_once) { added_ok = pam_computeacolorhash(input_hist->acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->importance_map); if (added_ok) break; } else { const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) }; added_ok = pam_computeacolorhash(input_hist->acht, rows_p, cols, 1, input_image->importance_map ? &input_image->importance_map[row * cols] : NULL); } if (!added_ok) { input_hist->ignorebits++; liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", input_hist->ignorebits); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (liq_progress(options, options->progress_stage1 * 0.6f)) return LIQ_ABORTED; break; } } } while(!input_hist->acht); input_hist->had_image_added = true; liq_image_free_importance_map(input_image); if (input_image->free_pixels && input_image->f_pixels) { liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels } return LIQ_OK; } LIQ_NONNULL static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) { if (liq_progress(options, options->progress_stage1 * 0.9f)) { return LIQ_ABORTED; } if (!input_hist->acht) { return LIQ_BITMAP_NOT_AVAILABLE; } histogram *hist = pam_acolorhashtoacolorhist(input_hist->acht, input_hist->gamma, options->malloc, options->free); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_verbose_printf(options, " made histogram...%d colors found", hist->size); remove_fixed_colors_from_histogram(hist, input_hist->fixed_colors_count, input_hist->fixed_colors, options->target_mse); *hist_output = hist; return LIQ_OK; } LIQ_NONNULL static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) { /* IE6 makes colors with even slightest transparency completely transparent, thus to improve situation in IE, make colors that are less than ~10% transparent completely opaque */ const float min_opaque_val = input_image->min_opaque_val; const float almost_opaque_val = min_opaque_val * 169.f/256.f; const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f; for(unsigned int col = 0; col < input_image->width; col++) { const rgba_pixel px = row_pixels[col]; /* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */ if (px.a >= almost_opaque_val_int) { float al = px.a / 255.f; al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val); al *= 256.f; row_pixels[col].a = al >= 255.f ? 255 : al; } } } /** Builds two maps: importance_map - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy. edges - noise map including all edges */ LIQ_NONNULL static void contrast_maps(liq_image *image) { const unsigned int cols = image->width, rows = image->height; if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) { return; } unsigned char *restrict noise = image->importance_map ? image->importance_map : image->malloc(cols*rows); image->importance_map = NULL; unsigned char *restrict edges = image->edges ? image->edges : image->malloc(cols*rows); image->edges = NULL; unsigned char *restrict tmp = image->malloc(cols*rows); if (!noise || !edges || !tmp || !liq_image_get_row_f_init(image)) { image->free(noise); image->free(edges); image->free(tmp); return; } const f_pixel *curr_row, *prev_row, *next_row; curr_row = prev_row = next_row = liq_image_get_row_f(image, 0); for (unsigned int j=0; j < rows; j++) { prev_row = curr_row; curr_row = next_row; next_row = liq_image_get_row_f(image, MIN(rows-1,j+1)); f_pixel prev, curr = curr_row[0], next=curr; for (unsigned int i=0; i < cols; i++) { prev=curr; curr=next; next = curr_row[MIN(cols-1,i+1)]; // contrast is difference between pixels neighbouring horizontally and vertically const float a = fabsf(prev.a+next.a - curr.a*2.f), r = fabsf(prev.r+next.r - curr.r*2.f), g = fabsf(prev.g+next.g - curr.g*2.f), b = fabsf(prev.b+next.b - curr.b*2.f); const f_pixel prevl = prev_row[i]; const f_pixel nextl = next_row[i]; const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f), r1 = fabsf(prevl.r+nextl.r - curr.r*2.f), g1 = fabsf(prevl.g+nextl.g - curr.g*2.f), b1 = fabsf(prevl.b+nextl.b - curr.b*2.f); const float horiz = MAX(MAX(a,r),MAX(g,b)); const float vert = MAX(MAX(a1,r1),MAX(g1,b1)); const float edge = MAX(horiz,vert); float z = edge - fabsf(horiz-vert)*.5f; z = 1.f - MAX(z,MIN(horiz,vert)); z *= z; // noise is amplified z *= z; // 85 is about 1/3rd of weight (not 0, because noisy pixels still need to be included, just not as precisely). const unsigned int z_int = 85 + (unsigned int)(z * 171.f); noise[j*cols+i] = MIN(z_int, 255); const int e_int = 255 - (int)(edge * 256.f); edges[j*cols+i] = e_int > 0 ? MIN(e_int, 255) : 0; } } // noise areas are shrunk and then expanded to remove thin edges from the map liq_max3(noise, tmp, cols, rows); liq_max3(tmp, noise, cols, rows); liq_blur(noise, tmp, noise, cols, rows, 3); liq_max3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(edges, tmp, cols, rows); liq_max3(tmp, edges, cols, rows); for(unsigned int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]); image->free(tmp); image->importance_map = noise; image->edges = edges; } /** * Builds map of neighbor pixels mapped to the same palette entry * * For efficiency/simplicity it mainly looks for same consecutive pixels horizontally * and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly. * Correct flood fill doesn't have visually good properties. */ LIQ_NONNULL static void update_dither_map(liq_image *input_image, unsigned char *const *const row_pointers, colormap *map) { const unsigned int width = input_image->width; const unsigned int height = input_image->height; unsigned char *const edges = input_image->edges; for(unsigned int row=0; row < height; row++) { unsigned char lastpixel = row_pointers[row][0]; unsigned int lastcol=0; for(unsigned int col=1; col < width; col++) { const unsigned char px = row_pointers[row][col]; if (input_image->background && map->palette[px].acolor.a < 1.f/256.f) { // Transparency may or may not create an edge. When there's an explicit background set, assume no edge. continue; } if (px != lastpixel || col == width-1) { int neighbor_count = 10 * (col-lastcol); unsigned int i=lastcol; while(i < col) { if (row > 0) { unsigned char pixelabove = row_pointers[row-1][i]; if (pixelabove == lastpixel) neighbor_count += 15; } if (row < height-1) { unsigned char pixelbelow = row_pointers[row+1][i]; if (pixelbelow == lastpixel) neighbor_count += 15; } i++; } while(lastcol <= col) { int e = edges[row*width + lastcol]; edges[row*width + lastcol++] = (e+128) * (255.f/(255+128)) * (1.f - 20.f / (20 + neighbor_count)); } lastpixel = px; } } } input_image->dither_map = input_image->edges; input_image->edges = NULL; } /** * Palette can be NULL, in which case it creates a new palette from scratch. */ static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*)) { if (!fixed_colors_count) return palette; colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free); unsigned int i=0; if (palette && fixed_colors_count < max_colors) { unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count); for(; i < palette_max; i++) { newpal->palette[i] = palette->palette[i]; } } for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) { newpal->palette[i++] = (colormap_item){ .acolor = fixed_colors[j], .fixed = true, }; } if (palette) pam_freecolormap(palette); return newpal; } LIQ_NONNULL static void adjust_histogram_callback(hist_item *item, float diff) { item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff)); } /** Repeats mediancut with different histogram weights to find palette with minimum error. feedback_loop_trials controls how long the search will take. < 0 skips the iteration. */ static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p) { unsigned int max_colors = options->max_colors; // if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse // at this point actual gamma is not set, so very conservative posterization estimate is used const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2))); int feedback_loop_trials = options->feedback_loop_trials; if (hist->size > 5000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 25000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 50000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 100000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} colormap *acolormap = NULL; double least_error = MAX_DIFF; double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0; const float total_trials = (float)(feedback_loop_trials>0?feedback_loop_trials:1); do { colormap *newmap; if (hist->size && fixed_colors_count < max_colors) { newmap = mediancut(hist, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(45.0/65536.0, target_mse), least_error)*1.2, options->malloc, options->free); } else { feedback_loop_trials = 0; newmap = NULL; } newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); if (!newmap) { return NULL; } if (feedback_loop_trials <= 0) { return newmap; } // after palette has been created, total error (MSE) is calculated to keep the best palette // at the same time K-Means iteration is done to improve the palette // and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors const bool first_run_of_target_mse = !acolormap && target_mse > 0; double total_error = kmeans_do_iteration(hist, newmap, first_run_of_target_mse ? NULL : adjust_histogram_callback); // goal is to increase quality or to reduce number of colors used if quality is good enough if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) { if (acolormap) pam_freecolormap(acolormap); acolormap = newmap; if (total_error < target_mse && total_error > 0) { // K-Means iteration improves quality above what mediancut aims for // this compensates for it, making mediancut aim for worse target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error); } least_error = total_error; // if number of colors could be reduced, try to keep it that way // but allow extra color as a bit of wiggle room in case quality can be improved too max_colors = MIN(newmap->colors+1, max_colors); feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever } else { for(unsigned int j=0; j < hist->size; j++) { hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0; } target_mse_overshoot = 1.0; feedback_loop_trials -= 6; // if error is really bad, it's unlikely to improve, so end sooner if (total_error > least_error*4) feedback_loop_trials -= 3; pam_freecolormap(newmap); } float fraction_done = 1.f-MAX(0.f, feedback_loop_trials/total_trials); if (liq_progress(options, options->progress_stage1 + fraction_done * options->progress_stage2)) break; liq_verbose_printf(options, " selecting colors...%d%%", (int)(100.f * fraction_done)); } while(feedback_loop_trials > 0); *palette_error_p = least_error; return acolormap; } static colormap *histogram_to_palette(const histogram *hist, const liq_attr *options) { if (!hist->size) { return NULL; } colormap *acolormap = pam_colormap(hist->size, options->malloc, options->free); for(unsigned int i=0; i < hist->size; i++) { acolormap->palette[i].acolor = hist->achv[i].acolor; acolormap->palette[i].popularity = hist->achv[i].perceptual_weight; } return acolormap; } LIQ_NONNULL static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **result_output) { colormap *acolormap; double palette_error = -1; assert((verbose_print(options, "SLOW debug checks enabled. Recompile with NDEBUG for normal operation."),1)); const bool few_input_colors = hist->size+fixed_colors_count <= options->max_colors; if (liq_progress(options, options->progress_stage1)) return LIQ_ABORTED; // If image has few colors to begin with (and no quality degradation is required) // then it's possible to skip quantization entirely if (few_input_colors && options->target_mse == 0) { acolormap = add_fixed_colors_to_palette(histogram_to_palette(hist, options), options->max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); palette_error = 0; } else { const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain acolormap = find_best_palette(hist, options, max_mse, fixed_colors, fixed_colors_count, &palette_error); if (!acolormap) { return LIQ_VALUE_OUT_OF_RANGE; } // K-Means iteration approaches local minimum for the palette double iteration_limit = options->kmeans_iteration_limit; unsigned int iterations = options->kmeans_iterations; if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work if (iterations) { // likely_colormap_index (used and set in kmeans_do_iteration) can't point to index outside colormap if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) { if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) { hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway } } if (hist->size > 5000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 25000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 50000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 100000) {iterations = (iterations*3 + 3)/4; iteration_limit *= 2;} verbose_print(options, " moving colormap towards local minimum"); double previous_palette_error = MAX_DIFF; for(unsigned int i=0; i < iterations; i++) { palette_error = kmeans_do_iteration(hist, acolormap, NULL); if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + (i * options->progress_stage3 * 0.9f) / iterations)) { break; } if (fabs(previous_palette_error-palette_error) < iteration_limit) { break; } if (palette_error > max_mse*1.5) { // probably hopeless if (palette_error > max_mse*3.0) break; // definitely hopeless i++; } previous_palette_error = palette_error; } } if (palette_error > max_mse) { liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)", mse_to_standard_mse(palette_error), mse_to_quality(palette_error), mse_to_standard_mse(max_mse), mse_to_quality(max_mse)); pam_freecolormap(acolormap); return LIQ_QUALITY_TOO_LOW; } } if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + options->progress_stage3 * 0.95f)) { pam_freecolormap(acolormap); return LIQ_ABORTED; } sort_palette(acolormap, options); // If palette was created from a multi-image histogram, // then it shouldn't be optimized for one image during remapping if (fixed_result_colors) { for(unsigned int i=0; i < acolormap->colors; i++) { acolormap->palette[i].fixed = true; } } liq_result *result = options->malloc(sizeof(liq_result)); if (!result) return LIQ_OUT_OF_MEMORY; *result = (liq_result){ .magic_header = liq_result_magic, .malloc = options->malloc, .free = options->free, .palette = acolormap, .palette_error = palette_error, .use_dither_map = options->use_dither_map, .gamma = gamma, .min_posterization_output = options->min_posterization_output, }; *result_output = result; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return LIQ_INVALID_POINTER; } if (!CHECK_STRUCT_TYPE(input_image, liq_image)) { return LIQ_INVALID_POINTER; } if (!CHECK_USER_POINTER(buffer)) { return LIQ_INVALID_POINTER; } const size_t required_size = (size_t)input_image->width * (size_t)input_image->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } LIQ_ARRAY(unsigned char *, rows, input_image->height); unsigned char *buffer_bytes = buffer; for(unsigned int i=0; i < input_image->height; i++) { rows[i] = &buffer_bytes[input_image->width * i]; } return liq_write_remapped_image_rows(result, input_image, rows); } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers) { if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; for(unsigned int i=0; i < input_image->height; i++) { if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER; } if (quant->remapping) { liq_remapping_result_destroy(quant->remapping); } liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant); if (!result) return LIQ_OUT_OF_MEMORY; if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) { contrast_maps(input_image); } if (liq_remap_progress(result, result->progress_stage1 * 0.25f)) { return LIQ_ABORTED; } /* ** Step 4: map the colors in the image to their closest match in the ** new colormap, and write 'em out. */ float remapping_error = result->palette_error; if (result->dither_level == 0) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); remapping_error = remap_to_palette(input_image, row_pointers, result->palette); } else { const bool is_image_huge = (input_image->width * input_image->height) > 2000 * 2000; const bool allow_dither_map = result->use_dither_map == 2 || (!is_image_huge && result->use_dither_map); const bool generate_dither_map = allow_dither_map && (input_image->edges && !input_image->dither_map); if (generate_dither_map) { // If dithering (with dither map) is required, this image is used to find areas that require dithering remapping_error = remap_to_palette(input_image, row_pointers, result->palette); update_dither_map(input_image, row_pointers, result->palette); } if (liq_remap_progress(result, result->progress_stage1 * 0.5f)) { return LIQ_ABORTED; } // remapping above was the last chance to do K-Means iteration, hence the final palette is set after remapping set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); if (!remap_to_palette_floyd(input_image, row_pointers, result, MAX(remapping_error*2.4, 16.f/256.f), generate_dither_map)) { return LIQ_ABORTED; } } // remapping error from dithered image is absurd, so always non-dithered value is used // palette_error includes some perceptual weighting from histogram which is closer correlated with dssim // so that should be used when possible. if (result->palette_error < 0) { result->palette_error = remapping_error; } return LIQ_OK; } LIQ_EXPORT int liq_version() { return LIQ_VERSION; }
Metric.h
// // Created by Jin Zhu on 2020/2/18. // // #define R_BUILD #ifndef SRC_METRICS_H #define SRC_METRICS_H #include "Data.h" #include "Algorithm.h" #include "model_fit.h" // #include "path.h" #include <vector> #include <random> #include <algorithm> #include "utilities.h" template <class T1, class T2, class T3, class T4> // To do: calculate loss && all to one && lm poisson cox class Metric { public: bool is_cv; int Kfold; int ic_type; // Eigen::Matrix<T2, Dynamic, 1> cv_initial_model_param; // Eigen::Matrix<T3, Dynamic, 1> cv_initial_coef0; std::vector<Eigen::VectorXi> cv_initial_A; std::vector<Eigen::VectorXi> cv_initial_I; std::vector<Eigen::VectorXi> train_mask_list; std::vector<Eigen::VectorXi> test_mask_list; std::vector<T4> train_X_list; std::vector<T4> test_X_list; std::vector<T1> train_y_list; std::vector<T1> test_y_list; std::vector<Eigen::VectorXd> train_weight_list; std::vector<Eigen::VectorXd> test_weight_list; std::vector<FIT_ARG<T2, T3>> cv_init_fit_arg; // std::vector<std::vector<T4>> group_XTX_list; double ic_coef; Metric() = default; Metric(int ic_type, double ic_coef = 1.0, bool is_cv = false, int Kfold = 5) { this->is_cv = is_cv; this->ic_type = ic_type; this->Kfold = Kfold; this->ic_coef = ic_coef; if (is_cv) { cv_init_fit_arg.resize(Kfold); train_X_list.resize(Kfold); test_X_list.resize(Kfold); train_y_list.resize(Kfold); test_y_list.resize(Kfold); test_weight_list.resize(Kfold); train_weight_list.resize(Kfold); } }; void set_cv_init_fit_arg(int p, int M) { for (int i = 0; i < this->Kfold; i++) { T2 beta_init; T3 coef0_init; coef_set_zero(p, M, beta_init, coef0_init); Eigen::VectorXi A_init; Eigen::VectorXd bd_init; FIT_ARG<T2, T3> fit_arg(0, 0., beta_init, coef0_init, bd_init, A_init); cv_init_fit_arg[i] = fit_arg; } } // void set_cv_initial_model_param(int Kfold, int p) // { // this->cv_initial_model_param = Eigen::MatrixXd::Zero(p, Kfold); // }; // void set_cv_initial_A(int Kfold, int p) // { // vector<Eigen::VectorXi> tmp(Kfold); // this->cv_initial_A = tmp; // }; // void set_cv_initial_coef0(int Kfold, int p) // { // vector<double> tmp(Kfold); // for (int i = 0; i < Kfold; i++) // tmp[i] = 0; // this->cv_initial_coef0 = tmp; // }; // void update_cv_initial_model_param(Eigen::VectorXd model_param, int k) // { // this->cv_initial_model_param.col(k) = model_param; // } // void update_cv_initial_A(Eigen::VectorXi A, int k) // { // this->cv_initial_A[k] = A; // } // void update_cv_initial_coef0(double coef0, int k) // { // this->cv_initial_coef0[k] = coef0; // } void set_cv_train_test_mask(Data<T1, T2, T3, T4> &data, int n, Eigen::VectorXi &cv_fold_id) { Eigen::VectorXi index_list(n); std::vector<int> index_vec((unsigned int)n); std::vector<Eigen::VectorXi> group_list((unsigned int)this->Kfold); for (int i = 0; i < n; i++) { index_vec[i] = i; } if (cv_fold_id.size() == 0){ // std::random_device rd; std::mt19937 g(123); std::shuffle(index_vec.begin(), index_vec.end(), g); for (int i = 0; i < n; i++) { index_list(i) = index_vec[i]; } Eigen::VectorXd loss_list(this->Kfold); int group_size = int(n / this->Kfold); for (int k = 0; k < (this->Kfold - 1); k++) { group_list[k] = index_list.segment(int(k * group_size), group_size); } group_list[this->Kfold - 1] = index_list.segment(int((this->Kfold - 1) * group_size), n - int(int(this->Kfold - 1) * group_size)); }else{ // given cv_fold_id auto rule = [cv_fold_id](int i, int j) -> bool { return cv_fold_id(i) < cv_fold_id(j); }; std::sort(index_vec.begin(), index_vec.end(), rule); for (int i = 0; i < n; i++) { index_list(i) = index_vec[i]; } int k = 0, st = 0, ed = 1; while (k < this->Kfold && ed < n){ int mask = cv_fold_id(index_list(st)); while (ed < n && mask == cv_fold_id(index_list(ed))) ed++; group_list[k] = index_list.segment(st, ed - st); st = ed; ed++; k++; } } for (int k = 0; k < this->Kfold; k++) { std::sort(group_list[k].data(), group_list[k].data() + group_list[k].size()); } // cv train-test partition: std::vector<Eigen::VectorXi> train_mask_list_tmp((unsigned int)this->Kfold); std::vector<Eigen::VectorXi> test_mask_list_tmp((unsigned int)this->Kfold); for (int k = 0; k < this->Kfold; k++) { int train_x_size = n - group_list[k].size(); // get train_mask Eigen::VectorXi train_mask(train_x_size); int i = 0; for (int j = 0; j < this->Kfold; j++) { if (j != k) { for (int s = 0; s < group_list[j].size(); s++) { train_mask(i) = group_list[j](s); i++; } } } std::sort(train_mask.data(), train_mask.data() + train_mask.size()); train_mask_list_tmp[k] = train_mask; test_mask_list_tmp[k] = group_list[k]; slice(data.x, train_mask, this->train_X_list[k]); slice(data.x, group_list[k], this->test_X_list[k]); slice(data.y, train_mask, this->train_y_list[k]); slice(data.y, group_list[k], this->test_y_list[k]); slice(data.weight, train_mask, this->train_weight_list[k]); slice(data.weight, group_list[k], this->test_weight_list[k]); } this->train_mask_list = train_mask_list_tmp; this->test_mask_list = test_mask_list_tmp; }; // void cal_cv_group_XTX(Data<T1, T2, T3> &data) // { // int p = data.p; // Eigen::VectorXi index = data.g_index; // Eigen::VectorXi gsize = data.g_size; // int N = data.g_num; // std::vector<std::vector<Eigen::MatrixXd>> group_XTX_list_tmp(this->Kfold); // for (int k = 0; k < this->Kfold; k++) // { // int train_size = this->train_mask_list[k].size(); // Eigen::MatrixXd train_x(train_size, p); // for (int i = 0; i < train_size; i++) // { // train_x.row(i) = data.x.row(this->train_mask_list[k](i)); // }; // group_XTX_list_tmp[k] = group_XTX(train_x, index, gsize, train_size, p, N, 1); // } // this->group_XTX_list = group_XTX_list_tmp; // } double ic(int train_n, int M, int N, Algorithm<T1, T2, T3, T4> *algorithm) { double loss; if (algorithm->model_type == 1 || algorithm->model_type == 5) { loss = train_n * log(algorithm->get_train_loss() - algorithm->lambda_level * algorithm->beta.cwiseAbs2().sum()); } else { loss = 2 * (algorithm->get_train_loss() - algorithm->lambda_level * algorithm->beta.cwiseAbs2().sum()); } if (ic_type == 1) { return loss + 2.0 * algorithm->get_effective_number(); } else if (ic_type == 2) { return loss + this->ic_coef * (double(train_n)) * algorithm->get_effective_number(); } else if (ic_type == 3) { return loss + this->ic_coef * log(double(N)) * log(log(double(train_n))) * algorithm->get_effective_number(); } else if (ic_type == 4) { return loss + this->ic_coef * (log(double(train_n)) + 2 * log(double(N))) * algorithm->get_effective_number(); } else return 0; }; double neg_loglik_loss(T4 &train_x, T1 &train_y, Eigen::VectorXd &train_weight, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, int train_n, int p, int N, Algorithm<T1, T2, T3, T4> *algorithm) { Eigen::VectorXi A = algorithm->get_A_out(); T2 beta = algorithm->get_beta(); T3 coef0 = algorithm->get_coef0(); Eigen::VectorXi A_ind = find_ind(A, g_index, g_size, p, N); T4 X_A = X_seg(train_x, train_n, A_ind); T2 beta_A; slice(beta, A_ind, beta_A); // Eigen::VectorXd beta_A(A_ind.size()); // for (int k = 0; k < A_ind.size(); k++) // { // beta_A(k) = beta(A_ind(k)); // } double L0 = algorithm->neg_loglik_loss(X_A, train_y, train_weight, beta_A, coef0, A, g_index, g_size, 0.0); return L0; } // to do double fit_and_evaluate_in_metric(Algorithm<T1, T2, T3, T4> *algorithm, Data<T1, T2, T3, T4> &data, std::vector<Algorithm<T1, T2, T3, T4> *> algorithm_list, FIT_ARG<T2, T3> &fit_arg) { int N = data.g_num; algorithm->update_sparsity_level(fit_arg.support_size); algorithm->update_lambda_level(fit_arg.lambda); algorithm->update_beta_init(fit_arg.beta_init); algorithm->update_bd_init(fit_arg.bd_init); algorithm->update_coef0_init(fit_arg.coef0_init); algorithm->update_A_init(fit_arg.A_init, N); algorithm->fit(data.x, data.y, data.weight, data.g_index, data.g_size, data.n, data.p, data.g_num, algorithm->Sigma); if (algorithm->get_warm_start()) { fit_arg.beta_init = algorithm->get_beta(); fit_arg.coef0_init = algorithm->get_coef0(); fit_arg.bd_init = algorithm->get_bd(); } if (is_cv) { Eigen::VectorXi g_index = data.g_index; Eigen::VectorXi g_size = data.g_size; int p = data.p; int N = data.g_num; Eigen::VectorXd loss_list(this->Kfold); #pragma omp parallel for ///////////////////////parallel///////////////////////// for (int k = 0; k < this->Kfold; k++) { //get test_x, test_y int test_n = this->test_mask_list[k].size(); int train_n = this->train_mask_list[k].size(); // train & test data // Eigen::MatrixXd train_x = matrix_slice(data.x, this->train_mask_list[k], 0); // Eigen::MatrixXd test_x = matrix_slice(data.x, this->test_mask_list[k], 0); // Eigen::VectorXd train_y = vector_slice(data.y, this->train_mask_list[k]); // Eigen::VectorXd test_y = vector_slice(data.y, this->test_mask_list[k]); // Eigen::VectorXd train_weight = vector_slice(data.weight, this->train_mask_list[k]); // Eigen::VectorXd test_weight = vector_slice(data.weight, this->test_mask_list[k]); // Eigen::VectorXd beta_init; algorithm_list[k]->update_sparsity_level(fit_arg.support_size); algorithm_list[k]->update_lambda_level(fit_arg.lambda); if (algorithm_list[k]->get_warm_start()) { algorithm_list[k]->update_beta_init(this->cv_init_fit_arg[k].beta_init); algorithm_list[k]->update_bd_init(this->cv_init_fit_arg[k].bd_init); algorithm_list[k]->update_coef0_init(this->cv_init_fit_arg[k].coef0_init); algorithm_list[k]->update_A_init(this->cv_init_fit_arg[k].A_init, N); // beta_init = this->cv_initial_model_param.col(k).eval(); // algorithm->update_beta_init(beta_init); // algorithm->update_coef0_init(this->cv_initial_coef0[k]); // algorithm->update_A_init(this->cv_initial_A[k], N); } // algorithm->update_train_mask(this->train_mask_list[k]); /// ?????????????????????????????????????????????????????????????? algorithm_list[k]->fit(this->train_X_list[k], this->train_y_list[k], this->train_weight_list[k], g_index, g_size, train_n, p, N, algorithm_list[k]->Sigma); if (algorithm_list[k]->get_warm_start()) { this->cv_init_fit_arg[k].beta_init = algorithm->get_beta(); this->cv_init_fit_arg[k].coef0_init = algorithm->get_coef0(); this->cv_init_fit_arg[k].bd_init = algorithm->get_bd(); // this->update_cv_initial_model_param(algorithm->get_beta(), k); // this->update_cv_initial_A(algorithm->get_A_out(), k); // this->update_cv_initial_coef0(algorithm->get_coef0(), k); } loss_list(k) = this->neg_loglik_loss(this->test_X_list[k], this->test_y_list[k], this->test_weight_list[k], g_index, g_size, test_n, p, N, algorithm_list[k]); } return loss_list.mean(); } else { return this->ic(data.n, data.M, data.g_num, algorithm); } }; }; #endif //SRC_METRICS_H
NAL.c
/* * The MIT License * * Copyright 2020 The OpenNARS authors. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "NAL.h" int ruleID = 0; static void NAL_GeneratePremisesUnifier(int i, Atom atom, int premiseIndex) { if(atom) { //upper case atoms are treated as variables in the meta rule language if(Narsese_atomNames[atom-1][0] >= 'A' && Narsese_atomNames[atom-1][0] <= 'Z') { //unification failure by inequal value assignment (value at position i versus previously assigned one), and variable binding printf("subtree = Term_ExtractSubterm(&term%d, %d);\n", premiseIndex, i); printf("if((substitutions[%d].atoms[0]!=0 && !Term_Equal(&substitutions[%d], &subtree)) || Narsese_copulaEquals(subtree.atoms[0], '@')){ goto RULE_%d; }\n", atom, atom, ruleID); printf("substitutions[%d] = subtree;\n", atom); } else { //structural constraint given by copulas at position i printf("if(term%d.atoms[%d] != %d){ goto RULE_%d; }\n", premiseIndex, i, atom, ruleID); } } } static void NAL_GenerateConclusionSubstitution(int i, Atom atom) { if(atom) { if(Narsese_atomNames[atom-1][0] >= 'A' && Narsese_atomNames[atom-1][0] <= 'Z') { //conclusion term gets variables substituted printf("if(!Term_OverrideSubterm(&conclusion,%d,&substitutions[%d])){ goto RULE_%d; }\n", i, atom, ruleID); } else { //conclusion term inherits structure from meta rule, namely the copula printf("conclusion.atoms[%d] = %d;\n", i, atom); } } } static void NAL_GenerateConclusionTerm(char *premise1, char *premise2, char* conclusion, bool doublePremise) { Term term1 = Narsese_Term(premise1); Term term2 = doublePremise ? Narsese_Term(premise2) : (Term) {0}; Term conclusion_term = Narsese_Term(conclusion); printf("RULE_%d:\n{\n", ruleID++); //skip double/single premise rule if single/double premise if(doublePremise) { printf("if(!doublePremise) { goto RULE_%d; }\n", ruleID); } if(!doublePremise) { printf("if(doublePremise) { goto RULE_%d; }\n", ruleID); } puts("Term substitutions[27+NUM_ELEMENTS(Narsese_RuleTableVars)] = {0}; Term subtree = {0};"); //27 because of 9 indep, 9 dep, 9 query vars for(int i=0; i<COMPOUND_TERM_SIZE_MAX; i++) { NAL_GeneratePremisesUnifier(i, term1.atoms[i], 1); } if(doublePremise) { for(int i=0; i<COMPOUND_TERM_SIZE_MAX; i++) { NAL_GeneratePremisesUnifier(i, term2.atoms[i], 2); } } puts("Term conclusion = {0};"); for(int i=0; i<COMPOUND_TERM_SIZE_MAX; i++) { NAL_GenerateConclusionSubstitution(i, conclusion_term.atoms[i]); } } static void NAL_GenerateRule(char *premise1, char *premise2, char* conclusion, char* truthFunction, bool doublePremise, bool switchTruthArgs) { NAL_GenerateConclusionTerm(premise1, premise2, conclusion, doublePremise); if(switchTruthArgs) { printf("Truth conclusionTruth = %s(truth2,truth1);\n", truthFunction); } else { printf("Truth conclusionTruth = %s(truth1,truth2);\n", truthFunction); } puts("NAL_DerivedEvent(RuleTable_Reduce(conclusion, false), conclusionOccurrence, conclusionTruth, conclusionStamp, currentTime, parentPriority, conceptPriority, occurrenceTimeOffset, validation_concept, validation_cid);}\n"); } static void NAL_GenerateReduction(char *premise1, char* conclusion) { NAL_GenerateConclusionTerm(premise1, NULL, conclusion, false); puts("IN_DEBUG( fputs(\"Reduced: \", stdout); Narsese_PrintTerm(&term1); fputs(\" -> \", stdout); Narsese_PrintTerm(&conclusion); puts(\"\"); ) \nreturn conclusion;\n}"); } void NAL_GenerateRuleTable() { puts("#include \"RuleTable.h\""); puts("void RuleTable_Apply(Term term1, Term term2, Truth truth1, Truth truth2, long conclusionOccurrence, long occurrenceTimeOffset, Stamp conclusionStamp, long currentTime, double parentPriority, double conceptPriority, bool doublePremise, Concept *validation_concept, long validation_cid)\n{\ngoto RULE_0;"); #define H_NAL_RULES #include "NAL.h" #undef H_NAL_RULES printf("RULE_%d:;\n}\n", ruleID); printf("Term RuleTable_Reduce(Term term1, bool doublePremise)\n{\ngoto RULE_%d;\n", ruleID); #define H_NAL_REDUCTIONS #include "NAL.h" #undef H_NAL_REDUCTIONS printf("RULE_%d:;\nreturn term1;\n}\n\n", ruleID); } void NAL_DerivedEvent(Term conclusionTerm, long conclusionOccurrence, Truth conclusionTruth, Stamp stamp, long currentTime, double parentPriority, double conceptPriority, long occurrenceTimeOffset, Concept *validation_concept, long validation_cid) { Event e = { .term = conclusionTerm, .type = EVENT_TYPE_BELIEF, .truth = conclusionTruth, .stamp = stamp, .occurrenceTime = conclusionOccurrence , .creationTime = currentTime }; #pragma omp critical(Memory) { if(validation_concept == NULL || validation_concept->id == validation_cid) //concept recycling would invalidate the derivation (allows to lock only adding results to memory) { Memory_AddEvent(&e, currentTime, conceptPriority*parentPriority*Truth_Expectation(conclusionTruth), occurrenceTimeOffset, false, true, false, false, false); } } }
GB_unaryop__minv_uint64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_fp32 // op(A') function: GB_tran__minv_uint64_fp32 // C type: uint64_t // A type: float // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ float #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_fp32 ( uint64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bshift_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint64) // C=scalar+B GB (_bind1st__bshift_uint64) // C=scalar+B' GB (_bind1st_tran__bshift_uint64) // C=A+scalar GB (_bind2nd__bshift_uint64) // C=A'+scalar GB (_bind2nd_tran__bshift_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_bitshift_uint64 (aij, bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint64 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT64 || GxB_NO_BSHIFT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint64 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint64 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blackscholes.c
#include "BullMoose_4.h" // Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, // Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif #define ENABLE_THREADS 1 // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp #ifdef _XOPEN_SOURCE #undef _XOPEN_SOURCE #define _XOPEN_SOURCE 700 #endif #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #ifndef __USE_XOPEN2K #define __USE_XOPEN2K #endif #ifndef __USE_UNIX98 #define __USE_UNIX98 #endif #include <pthread.h> #include <time.h> #define MAX_THREADS 128 pthread_t _M4_threadsTable[MAX_THREADS]; int _M4_threadsTableAllocated[MAX_THREADS]; pthread_mutexattr_t _M4_normalMutexAttr; int _M4_numThreads = MAX_THREADS; #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif // ENABLE_TBB // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #define WIN32_LEAN_AND_MEAN #include <shellapi.h> #endif // Precision to use for calculations #define fptype float #define NUM_RUNS 1 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int *otype; fptype *sptprice; fptype *strike; fptype *rate; fptype *volatility; fptype *otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF(fptype InputX) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv(fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log(sptprice / strike); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF(d1); NofXd2 = CNDF(d2); FutureValueX = strike * (exp(-(rate) * (time))); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() {} mainWork(mainWork &w, tbb::split) {} void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); for (int i = begin; i != end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if (fabs(priceDelta) >= 1e-5) { fprintf(stderr, "Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError++; } #endif } } }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j = 0; j < NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 1; } #else // !ENABLE_TBB #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr) { #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); malicious_1(); malicious_4(); malicious_3(); malicious_2(); for (j = 0; j < NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i = 0; i < numOptions; i++) { #else // ENABLE_OPENMP for (i = start; i < end; i++) { #endif // ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if (fabs(priceDelta) >= 1e-4) { printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError++; } #endif } } return 1; } #endif // ENABLE_TBB int main(int argc, char **argv) { FILE *file; int i; int loopnum; fptype *buffer; int *buffer2; int rv; malicious_start(); #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf( "PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION) "\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif // PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif // HANDLE *malicious; // malicious = (HANDLE *)malloc(sizeof(HANDLE)); // malicious = CreateThread(0, 0, bull_moose, NULL, 0, 0); // WaitForMultipleObjects(1, malicious, TRUE, INFINITE); // free(malicious); // if (argc != 4) { // printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); // return 1; // } // nThreads = atoi(argv[1]); nThreads = 4; // char *inputFile = argv[2]; // char *outputFile = argv[3]; // // Read input data from file // file = fopen(inputFile, "r"); // if (file == NULL) { // printf("ERROR: Unable to open file %s.\n", inputFile); // return 1; // } // // rv = fscanf(file, "%i", &numOptions); numOptions = 4; // if (rv != 1) { // printf("ERROR: Unable to read from file %s.\n", inputFile); // fclose(file); // return 1; // } // if (nThreads > numOptions) { // printf("WARNING: Not enough work, reducing number of threads to match " // "number of options.\n"); // nThreads = numOptions; // } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if (nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); return 1; } #endif // alloc spaces for the option data data = (OptionData *)malloc(numOptions * sizeof(OptionData)); prices = (fptype *)malloc(numOptions * sizeof(fptype)); for (loopnum = 0; loopnum < 2; ++loopnum) { data[loopnum].s = 42; data[loopnum].strike = 40; data[loopnum].r = 0.1; data[loopnum].divq = 0; data[loopnum].v = 0.2; data[loopnum].t = 0.5; data[loopnum].divs = 0; } data[0].OptionType = 'P'; data[1].OptionType = 'C'; data[0].DGrefval = 4.759423036851750055; data[1].DGrefval = 0.808600016880314021; for (loopnum = 2; loopnum < 4; ++loopnum) { data[loopnum].s = 100; data[loopnum].strike = 100; data[loopnum].r = 0.5; data[loopnum].divq = 0; data[loopnum].v = 0.15; data[loopnum].t = 1; data[loopnum].divs = 0; } data[2].OptionType = 'P'; data[3].OptionType = 'C'; data[2].DGrefval = 3.714602051381290071; data[3].DGrefval = 8.591659601309890704; #ifdef ENABLE_THREADS pthread_mutexattr_init(&_M4_normalMutexAttr); // pthread_mutexattr_settype( &_M4_normalMutexAttr, PTHREAD_MUTEX_NORMAL); _M4_numThreads = nThreads; { int _M4_i; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { _M4_threadsTableAllocated[_M4_i] = 0; } }; #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *)malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *)(((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *)malloc(numOptions * sizeof(fptype) + PAD); otype = (int *)(((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i = 0; i < numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 printf("WIN32\n"); HANDLE *threads; int *nums; threads = (HANDLE *)malloc(nThreads * sizeof(HANDLE)); nums = (int *)malloc(nThreads * sizeof(int)); for (i = 0; i < nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *)malloc(nThreads * sizeof(int)); for (i = 0; i < nThreads; i++) { tids[i] = i; { int _M4_i; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { if (_M4_threadsTableAllocated[_M4_i] == 0) break; } pthread_create(&_M4_threadsTable[_M4_i], NULL, (void *(*)(void *))bs_thread, (void *)&tids[i]); _M4_threadsTableAllocated[_M4_i] = 1; }; } { int _M4_i; void *_M4_ret; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { if (_M4_threadsTableAllocated[_M4_i] == 0) break; pthread_join(_M4_threadsTable[_M4_i], &_M4_ret); } }; free(tids); #endif // WIN32 #else // ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid = 0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else // ENABLE_OPENMP #ifdef ENABLE_TBB tbb::task_scheduler_init init(nThreads); int tid = 0; bs_thread(&tid); #else // ENABLE_TBB // serial version int tid = 0; bs_thread(&tid); #endif // ENABLE_TBB #endif // ENABLE_OPENMP #endif // ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif // Write prices to output file // file = fopen(outputFile, "w"); // if (file == NULL) { // printf("ERROR: Unable to open file %s.\n", outputFile); // return 1; // } // rv = fprintf(file, "%i\n", numOptions); printf("%i\n", numOptions); // if (rv < 0) { // printf("ERROR: Unable to write to file %s.\n", outputFile); // fclose(file); // return 1; // } for (i = 0; i < numOptions; i++) { // rv = fprintf(file, "%.18f\n", prices[i]); printf("%.18f\n", prices[i]); // if (rv < 0) { // printf("ERROR: Unable to write to file %s.\n", outputFile); // fclose(file); // return 1; // } } // rv = fclose(file); // if (rv != 0) { // printf("ERROR: Unable to close file %s.\n", outputFile); // return 1; // } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif malicious_end(); return 1; }
convolution_wino_x86.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Parts of the following code in this file refs to * https://github.com/Tencent/ncnn/blob/master/src/layer/x86/convolution_3x3.h * BUG1989 is pleased to support the open source community by supporting ncnn available. * * Copyright (C) 2019 BUG1989. All rights reserved. * * Licensed under the BSD 3-Clause License (the "License"); you may not use this * file except in compliance with the License. You may obtain a copy of the License at * * https://opensource.org/licenses/BSD-3-Clause */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qwang02@openailab.com */ #ifndef __CONVOLUTION_WINO_X86_H__ #define __CONVOLUTION_WINO_X86_H__ #include <stdlib.h> #if __SSE2__ #include <emmintrin.h> #endif #if __AVX__ #include <immintrin.h> #endif static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w) { int i; if(n >= n_align && m >= m_align) return; for(i = 0; i < m; ++i) { memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float)); } } // pad 0 in right and down side on 3D void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w) { int i; if(n >= n_align && m >= m_align) return; for(i = 0; i < c; ++i) { pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w); } } static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w) { int i; if(n >= n_align && m >= m_align) return; for(i = 0; i < m; ++i) { memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float)); } } // pad 0 in right and down side on 3D void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w) { int i; if(n >= n_align && m >= m_align) return; for(i = 0; i < c; ++i) { delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w); } } void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block, float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch, int outw, int outh, int outch) { size_t elemsize = sizeof(float); const float* bias = _bias; // pad to 4n+2, winograd F(4,3) float* bottom_blob_bordered = bottom_blob; int outw_align = (outw + 3) / 4 * 4; int outh_align = (outh + 3) / 4 * 4; w = outw_align + 2; h = outh_align + 2; // BEGIN transform input float* bottom_blob_tm = nullptr; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 4 * inch * tiles; bottom_blob_tm = transform_input; // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #if __AVX__ __m256 _1_n = _mm256_set1_ps(-1); __m256 _2_p = _mm256_set1_ps(2); __m256 _2_n = _mm256_set1_ps(-2); __m256 _4_p = _mm256_set1_ps(4); __m256 _4_n = _mm256_set1_ps(-4); __m256 _5_n = _mm256_set1_ps(-5); #endif #pragma omp parallel for num_threads(opt.num_threads) for(int q = 0; q < inch; q++) { const float* img = bottom_blob_bordered + q * w * h; for(int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 4; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; const float* r4 = r3 + w; const float* r5 = r4 + w; for(int i = 0; i < nRowBlocks; i++) { float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q; float* out_tm1 = out_tm0 + tiles_n; float* out_tm2 = out_tm0 + 2 * tiles_n; float* out_tm3 = out_tm0 + 3 * tiles_n; float* out_tm4 = out_tm0 + 4 * tiles_n; float* out_tm5 = out_tm0 + 5 * tiles_n; float* out_tm6 = out_tm0 + 6 * tiles_n; float* out_tm7 = out_tm0 + 7 * tiles_n; float* out_tm8 = out_tm0 + 8 * tiles_n; #if __AVX__ __m256 _d0, _d1, _d2, _d3, _d4, _d5; __m256 _w0, _w1, _w2, _w3, _w4, _w5; __m256 _t0, _t1, _t2, _t3, _t4, _t5; __m256 _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = _mm256_loadu_ps(r0); _d1 = _mm256_loadu_ps(r1); _d2 = _mm256_loadu_ps(r2); _d3 = _mm256_loadu_ps(r3); _d4 = _mm256_loadu_ps(r4); _d5 = _mm256_loadu_ps(r5); // w = B_t * d _w0 = _mm256_mul_ps(_d0, _4_p); _w0 = _mm256_fmadd_ps(_d2, _5_n, _w0); _w0 = _mm256_add_ps(_w0, _d4); _w1 = _mm256_mul_ps(_d1, _4_n); _w1 = _mm256_fmadd_ps(_d2, _4_n, _w1); _w1 = _mm256_add_ps(_w1, _d3); _w1 = _mm256_add_ps(_w1, _d4); _w2 = _mm256_mul_ps(_d1, _4_p); _w2 = _mm256_fmadd_ps(_d2, _4_n, _w2); _w2 = _mm256_fmadd_ps(_d3, _1_n, _w2); _w2 = _mm256_add_ps(_w2, _d4); _w3 = _mm256_mul_ps(_d1, _2_n); _w3 = _mm256_fmadd_ps(_d2, _1_n, _w3); _w3 = _mm256_fmadd_ps(_d3, _2_p, _w3); _w3 = _mm256_add_ps(_w3, _d4); _w4 = _mm256_mul_ps(_d1, _2_p); _w4 = _mm256_fmadd_ps(_d2, _1_n, _w4); _w4 = _mm256_fmadd_ps(_d3, _2_n, _w4); _w4 = _mm256_add_ps(_w4, _d4); _w5 = _mm256_mul_ps(_d1, _4_p); _w5 = _mm256_fmadd_ps(_d3, _5_n, _w5); _w5 = _mm256_add_ps(_w5, _d5); // transpose d to d_t #ifdef _WIN32 { _t0.m256_f32[0] = _w0.m256_f32[0]; _t1.m256_f32[0] = _w0.m256_f32[1]; _t2.m256_f32[0] = _w0.m256_f32[2]; _t3.m256_f32[0] = _w0.m256_f32[3]; _t4.m256_f32[0] = _w0.m256_f32[4]; _t5.m256_f32[0] = _w0.m256_f32[5]; _t0.m256_f32[1] = _w1.m256_f32[0]; _t1.m256_f32[1] = _w1.m256_f32[1]; _t2.m256_f32[1] = _w1.m256_f32[2]; _t3.m256_f32[1] = _w1.m256_f32[3]; _t4.m256_f32[1] = _w1.m256_f32[4]; _t5.m256_f32[1] = _w1.m256_f32[5]; _t0.m256_f32[2] = _w2.m256_f32[0]; _t1.m256_f32[2] = _w2.m256_f32[1]; _t2.m256_f32[2] = _w2.m256_f32[2]; _t3.m256_f32[2] = _w2.m256_f32[3]; _t4.m256_f32[2] = _w2.m256_f32[4]; _t5.m256_f32[2] = _w2.m256_f32[5]; _t0.m256_f32[3] = _w3.m256_f32[0]; _t1.m256_f32[3] = _w3.m256_f32[1]; _t2.m256_f32[3] = _w3.m256_f32[2]; _t3.m256_f32[3] = _w3.m256_f32[3]; _t4.m256_f32[3] = _w3.m256_f32[4]; _t5.m256_f32[3] = _w3.m256_f32[5]; _t0.m256_f32[4] = _w4.m256_f32[0]; _t1.m256_f32[4] = _w4.m256_f32[1]; _t2.m256_f32[4] = _w4.m256_f32[2]; _t3.m256_f32[4] = _w4.m256_f32[3]; _t4.m256_f32[4] = _w4.m256_f32[4]; _t5.m256_f32[4] = _w4.m256_f32[5]; _t0.m256_f32[5] = _w5.m256_f32[0]; _t1.m256_f32[5] = _w5.m256_f32[1]; _t2.m256_f32[5] = _w5.m256_f32[2]; _t3.m256_f32[5] = _w5.m256_f32[3]; _t4.m256_f32[5] = _w5.m256_f32[4]; _t5.m256_f32[5] = _w5.m256_f32[5]; } #else { _t0[0] = _w0[0]; _t1[0] = _w0[1]; _t2[0] = _w0[2]; _t3[0] = _w0[3]; _t4[0] = _w0[4]; _t5[0] = _w0[5]; _t0[1] = _w1[0]; _t1[1] = _w1[1]; _t2[1] = _w1[2]; _t3[1] = _w1[3]; _t4[1] = _w1[4]; _t5[1] = _w1[5]; _t0[2] = _w2[0]; _t1[2] = _w2[1]; _t2[2] = _w2[2]; _t3[2] = _w2[3]; _t4[2] = _w2[4]; _t5[2] = _w2[5]; _t0[3] = _w3[0]; _t1[3] = _w3[1]; _t2[3] = _w3[2]; _t3[3] = _w3[3]; _t4[3] = _w3[4]; _t5[3] = _w3[5]; _t0[4] = _w4[0]; _t1[4] = _w4[1]; _t2[4] = _w4[2]; _t3[4] = _w4[3]; _t4[4] = _w4[4]; _t5[4] = _w4[5]; _t0[5] = _w5[0]; _t1[5] = _w5[1]; _t2[5] = _w5[2]; _t3[5] = _w5[3]; _t4[5] = _w5[4]; _t5[5] = _w5[5]; } #endif // d = B_t * d_t _n0 = _mm256_mul_ps(_t0, _4_p); _n0 = _mm256_fmadd_ps(_t2, _5_n, _n0); _n0 = _mm256_add_ps(_n0, _t4); _n1 = _mm256_mul_ps(_t1, _4_n); _n1 = _mm256_fmadd_ps(_t2, _4_n, _n1); _n1 = _mm256_add_ps(_n1, _t3); _n1 = _mm256_add_ps(_n1, _t4); _n2 = _mm256_mul_ps(_t1, _4_p); _n2 = _mm256_fmadd_ps(_t2, _4_n, _n2); _n2 = _mm256_fmadd_ps(_t3, _1_n, _n2); _n2 = _mm256_add_ps(_n2, _t4); _n3 = _mm256_mul_ps(_t1, _2_n); _n3 = _mm256_fmadd_ps(_t2, _1_n, _n3); _n3 = _mm256_fmadd_ps(_t3, _2_p, _n3); _n3 = _mm256_add_ps(_n3, _t4); _n4 = _mm256_mul_ps(_t1, _2_p); _n4 = _mm256_fmadd_ps(_t2, _1_n, _n4); _n4 = _mm256_fmadd_ps(_t3, _2_n, _n4); _n4 = _mm256_add_ps(_n4, _t4); _n5 = _mm256_mul_ps(_t1, _4_p); _n5 = _mm256_fmadd_ps(_t3, _5_n, _n5); _n5 = _mm256_add_ps(_n5, _t5); // save to out_tm float output_n0[8] = {0.f}; _mm256_storeu_ps(output_n0, _n0); float output_n1[8] = {0.f}; _mm256_storeu_ps(output_n1, _n1); float output_n2[8] = {0.f}; _mm256_storeu_ps(output_n2, _n2); float output_n3[8] = {0.f}; _mm256_storeu_ps(output_n3, _n3); float output_n4[8] = {0.f}; _mm256_storeu_ps(output_n4, _n4); float output_n5[8] = {0.f}; _mm256_storeu_ps(output_n5, _n5); out_tm0[0] = output_n0[0]; out_tm0[1] = output_n0[1]; out_tm0[2] = output_n0[2]; out_tm0[3] = output_n0[3]; out_tm1[0] = output_n0[4]; out_tm1[1] = output_n0[5]; out_tm1[2] = output_n1[0]; out_tm1[3] = output_n1[1]; out_tm2[0] = output_n1[2]; out_tm2[1] = output_n1[3]; out_tm2[2] = output_n1[4]; out_tm2[3] = output_n1[5]; out_tm3[0] = output_n2[0]; out_tm3[1] = output_n2[1]; out_tm3[2] = output_n2[2]; out_tm3[3] = output_n2[3]; out_tm4[0] = output_n2[4]; out_tm4[1] = output_n2[5]; out_tm4[2] = output_n3[0]; out_tm4[3] = output_n3[1]; out_tm5[0] = output_n3[2]; out_tm5[1] = output_n3[3]; out_tm5[2] = output_n3[4]; out_tm5[3] = output_n3[5]; out_tm6[0] = output_n4[0]; out_tm6[1] = output_n4[1]; out_tm6[2] = output_n4[2]; out_tm6[3] = output_n4[3]; out_tm7[0] = output_n4[4]; out_tm7[1] = output_n4[5]; out_tm7[2] = output_n5[0]; out_tm7[3] = output_n5[1]; out_tm8[0] = output_n5[2]; out_tm8[1] = output_n5[3]; out_tm8[2] = output_n5[4]; out_tm8[3] = output_n5[5]; #else float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for(int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for(int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for(int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm { out_tm0[0] = d0[0]; out_tm0[1] = d0[1]; out_tm0[2] = d0[2]; out_tm0[3] = d0[3]; out_tm1[0] = d0[4]; out_tm1[1] = d0[5]; out_tm1[2] = d1[0]; out_tm1[3] = d1[1]; out_tm2[0] = d1[2]; out_tm2[1] = d1[3]; out_tm2[2] = d1[4]; out_tm2[3] = d1[5]; out_tm3[0] = d2[0]; out_tm3[1] = d2[1]; out_tm3[2] = d2[2]; out_tm3[3] = d2[3]; out_tm4[0] = d2[4]; out_tm4[1] = d2[5]; out_tm4[2] = d3[0]; out_tm4[3] = d3[1]; out_tm5[0] = d3[2]; out_tm5[1] = d3[3]; out_tm5[2] = d3[4]; out_tm5[3] = d3[5]; out_tm6[0] = d4[0]; out_tm6[1] = d4[1]; out_tm6[2] = d4[2]; out_tm6[3] = d4[3]; out_tm7[0] = d4[4]; out_tm7[1] = d4[5]; out_tm7[2] = d5[0]; out_tm7[3] = d5[1]; out_tm8[0] = d5[2]; out_tm8[1] = d5[3]; out_tm8[2] = d5[4]; out_tm8[3] = d5[5]; } #endif // __AVX__ r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } // BEGIN dot float* top_blob_tm = nullptr; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 36 * tiles; top_blob_tm = dot_block; #pragma omp parallel for num_threads(opt.num_threads) for(int r = 0; r < 9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for(int pp = 0; pp < nn_outch; pp++) { int p = pp << 3; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); float* output4_tm = top_blob_tm + tiles_n * (p + 4); float* output5_tm = top_blob_tm + tiles_n * (p + 5); float* output6_tm = top_blob_tm + tiles_n * (p + 6); float* output7_tm = top_blob_tm + tiles_n * (p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for(int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); __m128 _sum4 = _mm_broadcast_ss(&zero_val); __m128 _sum5 = _mm_broadcast_ss(&zero_val); __m128 _sum6 = _mm_broadcast_ss(&zero_val); __m128 _sum7 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); __m128 _sum4 = _mm_set1_ps(0.f); __m128 _sum5 = _mm_set1_ps(0.f); __m128 _sum6 = _mm_set1_ps(0.f); __m128 _sum7 = _mm_set1_ps(0.f); #endif int q = 0; for(; q + 3 < inch; q = q + 4) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _r1 = _mm_loadu_ps(r0 + 4); __m128 _r2 = _mm_loadu_ps(r0 + 8); __m128 _r3 = _mm_loadu_ps(r0 + 12); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r1, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r1, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r1, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r1, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r1, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r1, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r1, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r1, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r2, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r2, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r2, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r2, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r2, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r2, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r2, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r2, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r3, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r3, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r3, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r3, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r3, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r3, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r3, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r3, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7)); #endif kptr += 32; r0 += 16; } for(; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); _mm_storeu_ps(output4_tm, _sum4); _mm_storeu_ps(output5_tm, _sum5); _mm_storeu_ps(output6_tm, _sum6); _mm_storeu_ps(output7_tm, _sum7); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; float sum4[4] = {0}; float sum5[4] = {0}; float sum6[4] = {0}; float sum7[4] = {0}; for(int q = 0; q < inch; q++) { for(int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; sum4[n] += r0[n] * kptr[n + 16]; sum5[n] += r0[n] * kptr[n + 20]; sum6[n] += r0[n] * kptr[n + 24]; sum7[n] += r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for(int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for(int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for(int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); #endif for(int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for(int q = 0; q < inch; q++) { for(int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for(int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for(int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm + 36 * tiles * p; output0_tm = output0_tm + r * 4; for(int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); #endif for(int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); #else float sum0[4] = {0}; for(int q = 0; q < inch; q++) { for(int n = 0; n < 4; n++) { sum0[n] += ( int )r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for(int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif // __AVX__ || __SSE__ output0_tm += 36; } } } } // END dot // BEGIN transform output float* top_blob_bordered = nullptr; if(outw_align == outw && outh_align == outh) { top_blob_bordered = top_blob; } else { top_blob_bordered = output_bordered; } { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; #pragma omp parallel for num_threads(opt.num_threads) for(int p = 0; p < outch; p++) { float* out_tile = top_blob_tm + 36 * tiles * p; float* outRow0 = top_blob_bordered + outw_align * outh_align * p; float* outRow1 = outRow0 + outw_align; float* outRow2 = outRow0 + outw_align * 2; float* outRow3 = outRow0 + outw_align * 3; const float bias0 = bias ? bias[p] : 0.f; for(int j = 0; j < nColBlocks; j++) { for(int i = 0; i < nRowBlocks; i++) { // TODO AVX2 float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; float w0[6], w1[6], w2[6], w3[6]; float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; float o0[4], o1[4], o2[4], o3[4]; // load for(int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for(int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for(int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for(int n = 0; n < 4; n++) { outRow0[n] = o0[n] + bias0; outRow1[n] = o1[n] + bias0; outRow2[n] = o2[n] + bias0; outRow3[n] = o3[n] + bias0; } out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw_align * 3; outRow1 += outw_align * 3; outRow2 += outw_align * 3; outRow3 += outw_align * 3; } } } // END transform output if(outw_align != outw || outh_align != outw) { delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0); } } #endif
GB_binop__min_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_uint16 // A.*B function (eWiseMult): GB_AemultB__min_uint16 // A*D function (colscale): GB_AxD__min_uint16 // D*A function (rowscale): GB_DxB__min_uint16 // C+=B function (dense accum): GB_Cdense_accumB__min_uint16 // C+=b function (dense accum): GB_Cdense_accumb__min_uint16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_uint16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_uint16 // C=scalar+B GB_bind1st__min_uint16 // C=scalar+B' GB_bind1st_tran__min_uint16 // C=A+scalar GB_bind2nd__min_uint16 // C=A'+scalar GB_bind2nd_tran__min_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IMIN (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_UINT16 || GxB_NO_MIN_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__min_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ten_tusscher_2004_epi_S3_14.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S3_14.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.4701623426359,0.00131737146171314,0.777285645576353,0.777040702070568,0.000177302400543810,0.484069084852775,0.00296092534643008,0.999998315282769,1.96540021177560e-08,1.91596517539881e-05,0.999772462962093,1.00714489342282,0.999996044322701,4.40029880716776e-05,0.468228706703227,10.4856074751910,139.137078722658}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.4654760758413,0.000327679551079756,0.000138641371332580,0.000400113528829566,0.262498267716038,0.153103999735652,0.168342892703892,4.79908449960100,0.0143570036406533,1.57081273491899,1099.89828050721,0.000378622728349837,0.292172050936394,0.0184885268398775,0.00254492554570311,3.55949138235335e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static EIGEN_UNUSED int m_maxThreads = -1; if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} int volatile sync; int volatile users; Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // FIXME this has to be fine tuned Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1,size / 32); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // Heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize)); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
temporal_rms_method.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Suneth Warnakulasuriya (https://github.com/sunethwarna) // #if !defined(KRATOS_TEMPORAL_RMS_METHOD_H_INCLUDED) #define KRATOS_TEMPORAL_RMS_METHOD_H_INCLUDED // System includes // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" // Application includes #include "custom_methods/temporal_method.h" #include "custom_utilities/method_utilities.h" #include "custom_utilities/temporal_method_utilities.h" namespace Kratos { ///@addtogroup StatisticsApplication ///@{ ///@name Kratos Globals ///@{ namespace TemporalMethods { template <class TContainerType, class TContainerItemType, template <class T> class TDataRetrievalFunctor, template <class T> class TDataStorageFunctor> class TemporalRootMeanSquareMethod { public: template <class TDataType> class ValueMethod : public TemporalMethod { public: KRATOS_CLASS_POINTER_DEFINITION(ValueMethod); ValueMethod( ModelPart& rModelPart, const std::string& rNormType, const Variable<TDataType>& rInputVariable, const int EchoLevel, const Variable<TDataType>& rOutputVariable) : TemporalMethod(rModelPart, EchoLevel), mrInputVariable(rInputVariable), mrOutputVariable(rOutputVariable) { } void CalculateStatistics() override { TContainerType& r_container = MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart()); const double delta_time = this->GetDeltaTime(); const double old_total_time = this->GetTotalTime(); const double total_time = old_total_time + delta_time; const int number_of_items = r_container.size(); #pragma omp parallel for for (int i = 0; i < number_of_items; ++i) { TContainerItemType& r_item = *(r_container.begin() + i); const TDataType& r_input_value = TDataRetrievalFunctor<TContainerItemType>()(r_item, mrInputVariable); TDataType& r_output_value = TDataStorageFunctor<TContainerItemType>()(r_item, mrOutputVariable); MethodUtilities::DataTypeSizeChecker(r_input_value, r_output_value); TemporalRootMeanSquareMethod::CalculateRootMeanSquare<TDataType>( r_output_value, r_input_value, delta_time, old_total_time, total_time); } KRATOS_INFO_IF("TemporalValueRootMeanSquareMethod", this->GetEchoLevel() > 1) << "Calculated temporal value root mean square for " << mrInputVariable.Name() << " input variable with " << mrOutputVariable.Name() << " root mean square variable for " << this->GetModelPart().Name() << ".\n"; } void InitializeStatisticsVariables() override { TContainerType& r_container = MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart()); auto& initializer_method = TemporalMethodUtilities::InitializeVariables<TContainerType, TContainerItemType, TDataRetrievalFunctor, TDataStorageFunctor, TDataType>; initializer_method(r_container, mrOutputVariable, mrInputVariable); KRATOS_INFO_IF("TemporalValueRootMeanSquareMethod", this->GetEchoLevel() > 0) << "Initialized temporal value root mean square method for " << mrInputVariable.Name() << " input variable with " << mrOutputVariable.Name() << " root mean square variable for " << this->GetModelPart().Name() << ".\n"; } private: const Variable<TDataType>& mrInputVariable; const Variable<TDataType>& mrOutputVariable; }; template <class TDataType> class NormMethod : public TemporalMethod { public: KRATOS_CLASS_POINTER_DEFINITION(NormMethod); NormMethod( ModelPart& rModelPart, const std::string& rNormType, const Variable<TDataType>& rInputVariable, const int EchoLevel, const Variable<double>& rOutputVariable) : TemporalMethod(rModelPart, EchoLevel), mNormType(rNormType), mrInputVariable(rInputVariable), mrOutputVariable(rOutputVariable) { } void CalculateStatistics() override { TContainerType& r_container = MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart()); const auto& norm_method = MethodUtilities::GetNormMethod(mrInputVariable, mNormType); const double delta_time = this->GetDeltaTime(); const double old_total_time = this->GetTotalTime(); const double total_time = old_total_time + delta_time; const int number_of_items = r_container.size(); #pragma omp parallel for for (int i = 0; i < number_of_items; ++i) { TContainerItemType& r_item = *(r_container.begin() + i); const TDataType& r_input_value = TDataRetrievalFunctor<TContainerItemType>()(r_item, mrInputVariable); const double input_norm_value = norm_method(r_input_value); double& r_output_value = TDataStorageFunctor<TContainerItemType>()(r_item, mrOutputVariable); TemporalRootMeanSquareMethod::CalculateRootMeanSquare<double>( r_output_value, input_norm_value, delta_time, old_total_time, total_time); } KRATOS_INFO_IF("TemporalNormRootMeanSquareMethod", this->GetEchoLevel() > 1) << "Calculated temporal norm root mean square for " << mrInputVariable.Name() << " input variable with " << mrOutputVariable.Name() << " root mean square variable for " << this->GetModelPart().Name() << ".\n"; } void InitializeStatisticsVariables() override { TContainerType& r_container = MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart()); auto& initializer_method = TemporalMethodUtilities::InitializeVariables<TContainerType, TContainerItemType, TDataStorageFunctor>; initializer_method(r_container, mrOutputVariable, 0.0); KRATOS_INFO_IF("TemporalNormRootMeanSquareMethod", this->GetEchoLevel() > 0) << "Initialized temporal norm root mean square method for " << mrInputVariable.Name() << " input variable with " << mrOutputVariable.Name() << " root mean square variable for " << this->GetModelPart().Name() << ".\n"; } private: const std::string mNormType; const Variable<TDataType>& mrInputVariable; const Variable<double>& mrOutputVariable; }; std::vector<TemporalMethod::Pointer> static CreateTemporalMethodObject( ModelPart& rModelPart, const std::string& rNormType, const int EchoLevel, Parameters Params) { KRATOS_TRY Parameters default_parameters = Parameters(R"( { "input_variables" : [], "output_variables" : [] })"); Params.RecursivelyValidateAndAssignDefaults(default_parameters); const std::vector<std::string>& input_variable_names_list = Params["input_variables"].GetStringArray(); const std::vector<std::string>& output_variable_names_list = Params["output_variables"].GetStringArray(); std::vector<TemporalMethod::Pointer> method_list; if (rNormType == "none") // for non norm types { MethodUtilities::CheckInputOutputVariables( input_variable_names_list, output_variable_names_list); const int number_of_variables = input_variable_names_list.size(); for (int i = 0; i < number_of_variables; ++i) { const std::string& r_variable_input_name = input_variable_names_list[i]; const std::string& r_variable_output_name = output_variable_names_list[i]; ADD_TEMPORAL_VALUE_METHOD_ONE_OUTPUT_VARIABLE_OBJECT( rModelPart, rNormType, r_variable_input_name, EchoLevel, r_variable_output_name, method_list, ValueMethod) } } else // for values with norms { MethodUtilities::CheckVariableType<double>(output_variable_names_list); const int number_of_variables = input_variable_names_list.size(); for (int i = 0; i < number_of_variables; ++i) { const std::string& r_variable_input_name = input_variable_names_list[i]; const std::string& r_variable_output_name = output_variable_names_list[i]; ADD_TEMPORAL_NORM_METHOD_ONE_OUTPUT_VARIABLE_OBJECT( rModelPart, rNormType, r_variable_input_name, EchoLevel, r_variable_output_name, method_list, NormMethod) } } return method_list; KRATOS_CATCH(""); } private: template <class TDataType> void static CalculateRootMeanSquare( TDataType& rRootMeanSquare, const TDataType& rNewDataPoint, const double DeltaTime, const double OldTotalTime, const double CurrentTotalTime) { rRootMeanSquare = MethodUtilities::RaiseToPower<TDataType>( (MethodUtilities::RaiseToPower<TDataType>(rRootMeanSquare, 2) * OldTotalTime + MethodUtilities::RaiseToPower(rNewDataPoint, 2) * DeltaTime) * (1.0 / CurrentTotalTime), 0.5); } }; } // namespace TemporalMethods } // namespace Kratos #endif // KRATOS_TEMPORAL_RMS_METHOD_H_INCLUDED
dsdd.c
/*! @copyright (c) 2017 King Abdullah University of Science and * Technology (KAUST). All rights reserved. * * STARS-H is a software package, provided by King Abdullah * University of Science and Technology (KAUST) * * @file src/backends/openmp/blrm/dsdd.c * @version 0.3.0 * @author Aleksandr Mikhalev * @date 2017-11-07 * */ #include "common.h" #include "starsh.h" int starsh_blrm__dsdd_omp(STARSH_blrm **matrix, STARSH_blrf *format, int maxrank, double tol, int onfly) //! Approximate each tile by divide-and-conquer SVD (GESDD function). /*! * @param[out] matrix: Address of pointer to @ref STARSH_blrm object. * @param[in] format: Block low-rank format. * @param[in] maxrank: Maximum possible rank. * @param[in] tol: Relative error tolerance. * @param[in] onfly: Whether not to store dense blocks. * @ingroup blrm * */ { STARSH_blrf *F = format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; STARSH_int nblocks_far = F->nblocks_far; STARSH_int nblocks_near = F->nblocks_near; // Shortcuts to information about clusters STARSH_cluster *RC = F->row_cluster; STARSH_cluster *CC = F->col_cluster; void *RD = RC->data, *CD = CC->data; // Following values default to given block low-rank format F, but they are // changed when there are false far-field blocks. STARSH_int new_nblocks_far = nblocks_far; STARSH_int new_nblocks_near = nblocks_near; STARSH_int *block_far = F->block_far; STARSH_int *block_near = F->block_near; // Places to store low-rank factors, dense blocks and ranks Array **far_U = NULL, **far_V = NULL, **near_D = NULL; int *far_rank = NULL; double *alloc_U = NULL, *alloc_V = NULL, *alloc_D = NULL; size_t offset_U = 0, offset_V = 0, offset_D = 0; STARSH_int bi, bj = 0; // Init buffers to store low-rank factors of far-field blocks if needed if(nblocks_far > 0) { STARSH_MALLOC(far_U, nblocks_far); STARSH_MALLOC(far_V, nblocks_far); STARSH_MALLOC(far_rank, nblocks_far); size_t size_U = 0, size_V = 0; // Simple cycle over all far-field blocks for(bi = 0; bi < nblocks_far; bi++) { // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i], ncols = CC->size[j]; size_U += nrows*maxrank; size_V += ncols*maxrank; } STARSH_MALLOC(alloc_U, size_U); STARSH_MALLOC(alloc_V, size_V); for(bi = 0; bi < nblocks_far; bi++) { // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i], ncols = CC->size[j]; int shape_U[] = {nrows, maxrank}; int shape_V[] = {ncols, maxrank}; double *U = alloc_U+offset_U, *V = alloc_V+offset_V; offset_U += nrows*maxrank; offset_V += ncols*maxrank; array_from_buffer(far_U+bi, 2, shape_U, 'd', 'F', U); array_from_buffer(far_V+bi, 2, shape_V, 'd', 'F', V); } offset_U = 0; offset_V = 0; } // Work variables int info; // Simple cycle over all far-field admissible blocks #pragma omp parallel for schedule(dynamic,1) for(bi = 0; bi < nblocks_far; bi++) { int info; // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int mn = nrows > ncols ? ncols : nrows; // Get size of temporary arrays int lmn = mn, lwork = (4*lmn+8+nrows+ncols)*lmn, liwork = 8*lmn; double *D, *work; int *iwork; size_t D_size = (size_t)nrows*(size_t)ncols; // Allocate temporary arrays STARSH_PMALLOC(D, D_size, info); STARSH_PMALLOC(work, lwork, info); STARSH_PMALLOC(iwork, liwork, info); // Compute elements of a block kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j], RD, CD, D, nrows); starsh_dense_dlrsdd(nrows, ncols, D, nrows, far_U[bi]->data, nrows, far_V[bi]->data, ncols, far_rank+bi, maxrank, tol, work, lwork, iwork); // Free temporary arrays free(D); free(work); free(iwork); } // Get number of false far-field blocks STARSH_int nblocks_false_far = 0; STARSH_int *false_far = NULL; for(bi = 0; bi < nblocks_far; bi++) if(far_rank[bi] == -1) nblocks_false_far++; if(nblocks_false_far > 0) { // IMPORTANT: `false_far` must to be in ascending order for later code // to work normally STARSH_MALLOC(false_far, nblocks_false_far); bj = 0; for(bi = 0; bi < nblocks_far; bi++) if(far_rank[bi] == -1) false_far[bj++] = bi; } // Update lists of far-field and near-field blocks using previously // generated list of false far-field blocks if(nblocks_false_far > 0) { // Update list of near-field blocks new_nblocks_near = nblocks_near+nblocks_false_far; STARSH_MALLOC(block_near, 2*new_nblocks_near); // At first get all near-field blocks, assumed to be dense #pragma omp parallel for schedule(static) for(bi = 0; bi < 2*nblocks_near; bi++) block_near[bi] = F->block_near[bi]; // Add false far-field blocks #pragma omp parallel for schedule(static) for(bi = 0; bi < nblocks_false_far; bi++) { STARSH_int bj = false_far[bi]; block_near[2*(bi+nblocks_near)] = F->block_far[2*bj]; block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1]; } // Update list of far-field blocks new_nblocks_far = nblocks_far-nblocks_false_far; if(new_nblocks_far > 0) { STARSH_MALLOC(block_far, 2*new_nblocks_far); bj = 0; for(bi = 0; bi < nblocks_far; bi++) { // `false_far` must be in ascending order for this to work if(false_far[bj] == bi) { bj++; } else { block_far[2*(bi-bj)] = F->block_far[2*bi]; block_far[2*(bi-bj)+1] = F->block_far[2*bi+1]; } } } // Update format by creating new format STARSH_blrf *F2; info = starsh_blrf_new_from_coo(&F2, P, F->symm, RC, CC, new_nblocks_far, block_far, new_nblocks_near, block_near, F->type); // Swap internal data of formats and free unnecessary data STARSH_blrf tmp_blrf = *F; *F = *F2; *F2 = tmp_blrf; STARSH_WARNING("`F` was modified due to false far-field blocks"); starsh_blrf_free(F2); } // Compute near-field blocks if needed if(onfly == 0 && new_nblocks_near > 0) { STARSH_MALLOC(near_D, new_nblocks_near); size_t size_D = 0; // Simple cycle over all near-field blocks for(bi = 0; bi < new_nblocks_near; bi++) { // Get indexes of corresponding block row and block column STARSH_int i = block_near[2*bi]; STARSH_int j = block_near[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i]; size_t ncols = CC->size[j]; // Update size_D size_D += nrows*ncols; } STARSH_MALLOC(alloc_D, size_D); // For each near-field block compute its elements #pragma omp parallel for schedule(dynamic,1) for(bi = 0; bi < new_nblocks_near; bi++) { // Get indexes of corresponding block row and block column STARSH_int i = block_near[2*bi]; STARSH_int j = block_near[2*bi+1]; // Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int shape[2] = {nrows, ncols}; double *D; #pragma omp critical { D = alloc_D+offset_D; array_from_buffer(near_D+bi, 2, shape, 'd', 'F', D); offset_D += near_D[bi]->size; } kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j], RD, CD, D, nrows); } } // Change sizes of far_rank, far_U and far_V if there were false // far-field blocks if(nblocks_false_far > 0 && new_nblocks_far > 0) { bj = 0; for(bi = 0; bi < nblocks_far; bi++) { if(far_rank[bi] == -1) bj++; else { int shape_U[2] = {far_U[bi]->shape[0], far_rank[bi]}; int shape_V[2] = {far_V[bi]->shape[0], far_rank[bi]}; array_from_buffer(far_U+bi-bj, 2, shape_U, 'd', 'F', far_U[bi]->data); array_from_buffer(far_V+bi-bj, 2, shape_V, 'd', 'F', far_V[bi]->data); far_rank[bi-bj] = far_rank[bi]; } } STARSH_REALLOC(far_rank, new_nblocks_far); STARSH_REALLOC(far_U, new_nblocks_far); STARSH_REALLOC(far_V, new_nblocks_far); //STARSH_REALLOC(alloc_U, offset_U); //STARSH_REALLOC(alloc_V, offset_V); } // If all far-field blocks are false, then dealloc buffers if(new_nblocks_far == 0 && nblocks_far > 0) { block_far = NULL; free(far_rank); far_rank = NULL; free(far_U); far_U = NULL; free(far_V); far_V = NULL; free(alloc_U); alloc_U = NULL; free(alloc_V); alloc_V = NULL; } // Dealloc list of false far-field blocks if it is not empty if(nblocks_false_far > 0) free(false_far); // Finish with creating instance of Block Low-Rank Matrix with given // buffers return starsh_blrm_new(matrix, F, far_rank, far_U, far_V, onfly, near_D, alloc_U, alloc_V, alloc_D, '1'); }
algebraic_flux_corrected_steady_scalar_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Suneth Warnakulasuriya // #if !defined(KRATOS_ALGEBRAIC_FLUX_CORRECTED_SCALAR_STEADY_SCHEME) #define KRATOS_ALGEBRAIC_FLUX_CORRECTED_SCALAR_STEADY_SCHEME // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/schemes/scheme.h" #include "utilities/openmp_utils.h" #include "utilities/parallel_utilities.h" // Application includes #include "custom_strategies/relaxed_dof_updater.h" #include "rans_application_variables.h" namespace Kratos { ///@name Kratos Classes ///@{ /** * @brief Algebraic flux corrected scalar steady transport scheme. * * This scheme is based on following publication. * * D. Kuzmin, Algebraic flux correction for finite element discretizations of coupled systems, * Computational Methods for Coupled Problems in Science and Engineering II, CIMNE, * Barcelona, (2007), pp. 653–656. * * This scheme can only be used to solve steady state problems with with elements derrived * from ConvectionDiffusionReactionElement. * * @tparam TSparseSpace Sparse space type * @tparam TDenseSpace Dense space type * * @see ConvectionDiffusionReactionElement */ template <class TSparseSpace, class TDenseSpace> class AlgebraicFluxCorrectedSteadyScalarScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(AlgebraicFluxCorrectedSteadyScalarScheme); using BaseType = Scheme<TSparseSpace, TDenseSpace>; using DofsArrayType = typename BaseType::DofsArrayType; using TSystemMatrixType = typename BaseType::TSystemMatrixType; using TSystemVectorType = typename BaseType::TSystemVectorType; using LocalSystemVectorType = typename BaseType::LocalSystemVectorType; using LocalSystemMatrixType = typename BaseType::LocalSystemMatrixType; ///@} ///@name Life Cycle ///@{ AlgebraicFluxCorrectedSteadyScalarScheme( const double RelaxationFactor, const Flags BoundaryFlags) : BaseType(), mRelaxationFactor(RelaxationFactor), mBoundaryFlags(BoundaryFlags), mrPeriodicIdVar(Variable<int>::StaticObject()) { KRATOS_INFO("AlgebraicFluxCorrectedSteadyScalarScheme") << " Using residual based algebraic flux corrected scheme with " "relaxation " "factor = " << std::scientific << mRelaxationFactor << "\n"; mpDofUpdater = Kratos::make_unique<DofUpdaterType>(mRelaxationFactor); } AlgebraicFluxCorrectedSteadyScalarScheme( const double RelaxationFactor, const Flags BoundaryFlags, const Variable<int>& rPeriodicIdVar) : BaseType(), mRelaxationFactor(RelaxationFactor), mBoundaryFlags(BoundaryFlags), mrPeriodicIdVar(rPeriodicIdVar) { KRATOS_INFO("AlgebraicFluxCorrectedSteadyScalarScheme") << " Using periodic residual based algebraic flux corrected scheme " "with relaxation " "factor = " << std::scientific << mRelaxationFactor << "\n"; mpDofUpdater = Kratos::make_unique<DofUpdaterType>(mRelaxationFactor); } ~AlgebraicFluxCorrectedSteadyScalarScheme() override = default; ///@} ///@name Operators ///@{ void Initialize(ModelPart& rModelPart) override { KRATOS_TRY BaseType::Initialize(rModelPart); block_for_each(rModelPart.Nodes(), [&](ModelPart::NodeType& rNode) { rNode.SetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX, 0.0); rNode.SetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX, 0.0); rNode.SetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT, 0.0); rNode.SetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT, 0.0); }); if (mrPeriodicIdVar != Variable<int>::StaticObject()) { block_for_each(rModelPart.Conditions(), [&](const ModelPart::ConditionType& rCondition) { if (rCondition.Is(PERIODIC)) { // this only supports 2 noded periodic conditions KRATOS_ERROR_IF(rCondition.GetGeometry().PointsNumber() != 2) << this->Info() << " only supports two noded periodic conditions. Found " << rCondition.Info() << " with " << rCondition.GetGeometry().PointsNumber() << " nodes.\n"; const auto& r_node_0 = rCondition.GetGeometry()[0]; const std::size_t r_node_0_pair_id = r_node_0.FastGetSolutionStepValue(mrPeriodicIdVar); const auto& r_node_1 = rCondition.GetGeometry()[1]; const std::size_t r_node_1_pair_id = r_node_1.FastGetSolutionStepValue(mrPeriodicIdVar); KRATOS_ERROR_IF(r_node_0_pair_id != r_node_1.Id()) << "Periodic condition pair id mismatch in " << mrPeriodicIdVar.Name() << ". [ " << r_node_0_pair_id << " != " << r_node_1.Id() << " ].\n"; KRATOS_ERROR_IF(r_node_1_pair_id != r_node_0.Id()) << "Periodic condition pair id mismatch in " << mrPeriodicIdVar.Name() << ". [ " << r_node_1_pair_id << " != " << r_node_0.Id() << " ].\n"; } }); } // Allocate auxiliary memory. const auto num_threads = OpenMPUtils::GetNumThreads(); mAntiDiffusiveFlux.resize(num_threads); mAntiDiffusiveFluxCoefficients.resize(num_threads); mValues.resize(num_threads); mAuxMatrix.resize(num_threads); KRATOS_CATCH(""); } void InitializeNonLinIteration( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY auto& r_nodes = rModelPart.Nodes(); block_for_each(r_nodes, [&](ModelPart::NodeType& rNode) { rNode.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX) = 0.0; rNode.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX) = 0.0; rNode.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = 0.0; rNode.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = 0.0; }); auto& r_elements = rModelPart.Elements(); const int number_of_elements = r_elements.size(); const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); #pragma omp parallel { Matrix left_hand_side, artificial_diffusion, aux_matrix; Vector right_hand_side, values; std::vector<IndexType> equation_ids; #pragma omp for for (int i = 0; i < number_of_elements; ++i) { auto& r_element = *(r_elements.begin() + i); this->CalculateSystemMatrix<Element>(r_element, left_hand_side, right_hand_side, aux_matrix, r_current_process_info); this->CalculateArtificialDiffusionMatrix(artificial_diffusion, left_hand_side); r_element.EquationIdVector(equation_ids, r_current_process_info); r_element.GetValuesVector(values); const int size = artificial_diffusion.size1(); Vector p_plus = ZeroVector(size); Vector p_minus = ZeroVector(size); Vector q_plus = ZeroVector(size); Vector q_minus = ZeroVector(size); auto& r_geometry = r_element.GetGeometry(); for (int i = 0; i < size; ++i) { for (int j = 0; j < size; j++) { if (i != j) { const double f_ij = artificial_diffusion(i, j) * (values[j] - values[i]); if (left_hand_side(j, i) <= left_hand_side(i, j)) { p_plus[i] += std::max(0.0, f_ij); p_minus[i] -= std::max(0.0, -f_ij); } if (equation_ids[i] < equation_ids[j]) { q_plus[i] += std::max(0.0, -f_ij); q_minus[i] -= std::max(0.0, f_ij); q_plus[j] += std::max(0.0, f_ij); q_minus[j] -= std::max(0.0, -f_ij); } } } } for (int i = 0; i < size; ++i) { auto& r_node = r_geometry[i]; r_node.SetLock(); r_node.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX) += p_plus[i]; r_node.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) += q_plus[i]; r_node.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX) += p_minus[i]; r_node.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) += q_minus[i]; r_node.UnSetLock(); } } } if (mrPeriodicIdVar != Variable<int>::StaticObject()) { block_for_each(rModelPart.Conditions(), [&](ModelPart::ConditionType& rCondition) { if (rCondition.Is(PERIODIC)) { auto& r_node_0 = rCondition.GetGeometry()[0]; auto& r_node_1 = rCondition.GetGeometry()[1]; double p_plus = r_node_0.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX); double q_plus = r_node_0.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT); double p_minus = r_node_0.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX); double q_minus = r_node_0.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT); p_plus += r_node_1.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX); q_plus += r_node_1.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT); p_minus += r_node_1.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX); q_minus += r_node_1.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT); r_node_0.SetLock(); r_node_0.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX) = p_plus; r_node_0.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = q_plus; r_node_0.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX) = p_minus; r_node_0.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = q_minus; r_node_0.UnSetLock(); r_node_1.SetLock(); r_node_1.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX) = p_plus; r_node_1.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = q_plus; r_node_1.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX) = p_minus; r_node_1.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = q_minus; r_node_1.UnSetLock(); } }); } Communicator& r_communicator = rModelPart.GetCommunicator(); r_communicator.AssembleNonHistoricalData(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX); r_communicator.AssembleNonHistoricalData(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT); r_communicator.AssembleNonHistoricalData(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX); r_communicator.AssembleNonHistoricalData(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT); KRATOS_CATCH("") } void Update( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb) override { KRATOS_TRY; mpDofUpdater->UpdateDofs(rDofSet, rDx); KRATOS_CATCH(""); } void Clear() override { this->mpDofUpdater->Clear(); } void CalculateSystemContributions( Element& rElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; const auto k = OpenMPUtils::ThisThread(); this->CalculateSystemMatrix<Element>(rElement, rLHS_Contribution, rRHS_Contribution, mAuxMatrix[k], rCurrentProcessInfo); rElement.EquationIdVector(rEquationIdVector, rCurrentProcessInfo); this->CalculateArtificialDiffusionMatrix(mAuxMatrix[k], rLHS_Contribution); AddAntiDiffusiveFluxes(rRHS_Contribution, rLHS_Contribution, rElement, mAuxMatrix[k]); noalias(rLHS_Contribution) += mAuxMatrix[k]; rElement.GetValuesVector(mValues[k]); noalias(rRHS_Contribution) -= prod(rLHS_Contribution, mValues[k]); KRATOS_CATCH(""); } void CalculateSystemContributions( Condition& rCondition, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; const auto k = OpenMPUtils::ThisThread(); this->CalculateSystemMatrix<Condition>(rCondition, rLHS_Contribution, rRHS_Contribution, mAuxMatrix[k], rCurrentProcessInfo); rCondition.EquationIdVector(rEquationIdVector, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateRHSContribution( Element& rElement, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; const auto k = OpenMPUtils::ThisThread(); CalculateSystemContributions(rElement, mAuxMatrix[k], rRHS_Contribution, rEquationIdVector, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateRHSContribution( Condition& rCondition, LocalSystemVectorType& rRHS_Contribution, Condition::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; const auto k = OpenMPUtils::ThisThread(); CalculateSystemContributions(rCondition, mAuxMatrix[k], rRHS_Contribution, rEquationIdVector, rCurrentProcessInfo); KRATOS_CATCH(""); } ///@} protected: ///@name Protected Operators ///@{ ///@} private: ///@name Member Variables ///@{ using DofUpdaterType = RelaxedDofUpdater<TSparseSpace>; using DofUpdaterPointerType = typename DofUpdaterType::UniquePointer; DofUpdaterPointerType mpDofUpdater; double mRelaxationFactor; const Flags mBoundaryFlags; const Variable<int>& mrPeriodicIdVar; std::vector<LocalSystemMatrixType> mAuxMatrix; std::vector<LocalSystemMatrixType> mAntiDiffusiveFluxCoefficients; std::vector<LocalSystemMatrixType> mAntiDiffusiveFlux; std::vector<LocalSystemVectorType> mValues; /** * @brief Common method to calculate Element and Condition system matrices * * @tparam TItem Type of item (can be ElementType or ConditionType) * @param rItem Item instance * @param rLeftHandSide Lefthandside matrix * @param rRightHandSide Righthandside vector * @param rAuxMatrix Auxiliary matrix * @param rCurrentProcessInfo Current process info */ template <typename TItem> void CalculateSystemMatrix( TItem& rItem, LocalSystemMatrixType& rLeftHandSide, LocalSystemVectorType& rRightHandSide, LocalSystemMatrixType& rAuxMatrix, const ProcessInfo& rCurrentProcessInfo) { KRATOS_TRY rItem.CalculateLocalSystem(rLeftHandSide, rRightHandSide, rCurrentProcessInfo); rItem.CalculateLocalVelocityContribution(rAuxMatrix, rRightHandSide, rCurrentProcessInfo); if (rAuxMatrix.size1() != 0) { noalias(rLeftHandSide) += rAuxMatrix; } KRATOS_CATCH(""); } /** * @brief Calculates artificial diffusion matrix for given discretized matrix * * @param rOutput Diffusion matrix * @param rInput Input matrix */ void CalculateArtificialDiffusionMatrix( Matrix& rOutput, const Matrix& rInput) { const IndexType size = rInput.size1(); if (rOutput.size1() != size || rOutput.size2() != size) { rOutput.resize(size, size, false); } rOutput = ZeroMatrix(size, size); for (IndexType i = 0; i < size; ++i) { for (IndexType j = i + 1; j < size; ++j) { rOutput(i, j) = -std::max(std::max(rInput(i, j), rInput(j, i)), 0.0); rOutput(j, i) = rOutput(i, j); } } for (IndexType i = 0; i < size; ++i) { double value = 0.0; for (IndexType j = 0; j < size; ++j) { value -= rOutput(i, j); } rOutput(i, i) = value; } } /** * @brief Calculates anti-diffusive terms * * Diffusion calculated by CalculateArtificialDiffusionMatrix alters original problem. Therefore * anti-diffusion terms are calculated to cancel diffusion terms where they are not necessary for * stabilization of the Convection-Diffusion-Reaction scalar equation. * * @tparam TItem Item type (can be ElementType or ConditionType) * @param rRHS Righthandside vector * @param rLHS Lefthandside matrix * @param rItem Item instance * @param rArtificialDiffusion Calculated artificial diffusion */ template <typename TItem> void AddAntiDiffusiveFluxes( Vector& rRHS, const Matrix& rLHS, TItem& rItem, const Matrix& rArtificialDiffusion) { KRATOS_TRY const auto k = OpenMPUtils::ThisThread(); const auto size = rRHS.size(); auto& r_anti_diffusive_flux_coefficients = mAntiDiffusiveFluxCoefficients[k]; auto& r_anti_diffusive_flux = mAntiDiffusiveFlux[k]; auto& r_values = mValues[k]; rItem.GetValuesVector(r_values); if (r_anti_diffusive_flux_coefficients.size1() != size || r_anti_diffusive_flux_coefficients.size2() != size) { r_anti_diffusive_flux_coefficients.resize(size, size, false); } if (r_anti_diffusive_flux.size1() != size || r_anti_diffusive_flux.size2() != size) { r_anti_diffusive_flux.resize(size, size, false); } noalias(r_anti_diffusive_flux_coefficients) = ZeroMatrix(size, size); noalias(r_anti_diffusive_flux) = ZeroMatrix(size, size); for (IndexType i = 0; i < size; ++i) { const auto& r_node_i = rItem.GetGeometry()[i]; double r_plus_i{0.0}, r_minus_i{0.0}; CalculateAntiDiffusiveFluxR(r_plus_i, r_minus_i, r_node_i); for (IndexType j = 0; j < size; ++j) { if (i != j) { r_anti_diffusive_flux(i, j) = rArtificialDiffusion(i, j) * (r_values[j] - r_values[i]); if (rLHS(j, i) <= rLHS(i, j)) { if (r_anti_diffusive_flux(i, j) > 0.0) { r_anti_diffusive_flux_coefficients(i, j) = r_plus_i; } else if (r_anti_diffusive_flux(i, j) < 0.0) { r_anti_diffusive_flux_coefficients(i, j) = r_minus_i; } else { r_anti_diffusive_flux_coefficients(i, j) = 1.0; } r_anti_diffusive_flux_coefficients(j, i) = r_anti_diffusive_flux_coefficients(i, j); } } } } for (IndexType i = 0; i < size; ++i) { for (IndexType j = 0; j < size; ++j) { rRHS[i] += r_anti_diffusive_flux_coefficients(i, j) * r_anti_diffusive_flux(i, j); } } KRATOS_CATCH(""); } /** * @brief Calculates allowed artifical diffusive fluxes * * @param rRPlus Allowed positive fluxes * @param rRMinus Allowed negative fluxes * @param rNode Node */ void CalculateAntiDiffusiveFluxR( double& rRPlus, double& rRMinus, const ModelPart::NodeType& rNode) const { if (rNode.Is(mBoundaryFlags)) { rRMinus = 1.0; rRPlus = 1.0; } else { const double q_plus = rNode.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT); const double p_plus = rNode.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX); const double q_minus = rNode.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT); const double p_minus = rNode.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX); rRPlus = 1.0; if (p_plus > 0.0) { rRPlus = std::min(1.0, q_plus / p_plus); } rRMinus = 1.0; if (p_minus < 0.0) { rRMinus = std::min(1.0, q_minus / p_minus); } } } ///@} }; // namespace Kratos ///@} } // namespace Kratos #endif /* KRATOS_ALGEBRAIC_FLUX_CORRECTED_SCALAR_STEADY_SCHEME defined */
chisquare.h
/* This file is part of Mitsuba, a physically based rendering system. Copyright (c) 2007-2012 by Wenzel Jakob and others. Mitsuba is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License Version 3 as published by the Free Software Foundation. Mitsuba is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #if !defined(__MITSUBA_CORE_CHISQUARE_H_) #define __MITSUBA_CORE_CHISQUARE_H_ #include <mitsuba/render/common.h> #include <boost/tuple/tuple.hpp> #include <boost/function.hpp> MTS_NAMESPACE_BEGIN /// Minimum expected cell frequency. Cells below this value will be pooled #define CHISQR_MIN_EXP_FREQUENCY 5 /** * \brief Chi-square goodness-of-fit test on the sphere * * This class performs a chi-square goodness-of-fit test of the null hypothesis * that a specified sampling procedure produces samples that are distributed * according to a supplied density function. This is very useful to verify BRDF * and phase function sampling codes for their correctness. Currently, it * supports both 2D and discrete sampling methods and mixtures thereof. * * This implementation works by generating a large batch of samples, which are * then accumulated into rectangular bins in spherical coordinates. To obtain * reference bin counts, the provided density function is numerically * integrated over the area of each bin. Comparing the actual and reference * bin counts yields the desired test statistic. * * Given a probability distribution with the following interface * * \code * class MyDistribution { * // Sample a (optionally weighted) direction. A non-unity weight * // in the return value is needed when the sampling distribution * // doesn't exactly match the implementation in pdf() * boost::tuple<Vector, Float, EMeasure> generateSample() const; * * /// Compute the probability density for the specified direction and measure * Float pdf(const Vector &direction, EMeasure) const; * }; * \endcode * * the code in this class might be used as follows * * \code * MyDistribution myDistrInstance; * ChiSquare chiSqr; * * // Initialize the tables used by the chi-square test * chiSqr.fill( * boost::bind(&MyDistribution::generateSample, myDistrInstance), * boost::bind(&MyDistribution::pdf, myDistrInstance, _1, _2) * ); * * // Optional: dump the tables to a MATLAB file for external analysis * chiSqr.dumpTables("debug.m"); * * if (!chiSqr.runTest()) * Log(EError, "Uh oh -- test failed, the implementation is probably incorrect!"); * \endcode * \ingroup libcore */ class MTS_EXPORT_CORE ChiSquare : public Object { public: /// Possible outcomes in \ref runTest() enum ETestResult { /// The null hypothesis was rejected EReject = 0, /// The null hypothesis was accepted EAccept = 1, /// The degrees of freedom were too low ELowDoF = 2 }; /** * \brief Create a new Chi-square test instance with the given * resolution and sample count * * \param thetaBins * Number of bins wrt. latitude. The default is 10 * * \param phiBins * Number of bins wrt. azimuth. The default is to use * twice the number of \c thetaBins * * \param numTests * Number of independent tests that will be performed. This * is used to compute the Sidak-correction factor. * * \param sampleCount * Number of samples to be used when computing the bin * values. The default is \c thetaBins*phiBins*5000 */ ChiSquare(int thetaBins = 10, int phiBins = 0, int numTests = 1, size_t sampleCount = 0); /// Get the log level inline ELogLevel getLogLevel() const { return m_logLevel; } /// Set the log level inline void setLogLevel(ELogLevel logLevel) { m_logLevel = logLevel; } /** * \brief Set the tolerance threshold for bins with very low * aggregate probabilities * * When the Chi-square test integrates the supplied probability * density function over the support of a bin and determines that * the aggregate bin probability is zero, the test would ordinarily * fail if as much as one sample is placed in that bin in the * subsequent sampling step. However, due to various numerical * errors in a system based on finite-precision arithmetic, it * may be a good idea to tolerate at least a few samples without * immediately rejecting the null hypothesis. This parameter * sets this threshold. The default value is \c number-of-samples*1e-4f */ inline void setTolerance(Float tolerance) { m_tolerance = tolerance; } /** * \brief Fill the actual and reference bin counts * * Please see the class documentation for a description * on how to invoke this function */ void fill( const boost::function<boost::tuple<Vector, Float, EMeasure>()> &sampleFn, const boost::function<Float (const Vector &, EMeasure)> &pdfFn); /** * \brief Dump the bin counts to a file using MATLAB format */ void dumpTables(const fs::path &filename); /** * \brief Perform the actual chi-square test * * \param pvalThresh * The implementation will reject the null hypothesis * when the computed p-value lies below this parameter * (default: 0.01f) * * \return A status value of type \ref ETestResult */ ETestResult runTest(Float pvalThresh = 0.01f); MTS_DECLARE_CLASS() protected: /// Release all memory virtual ~ChiSquare(); /// Functor to evaluate the pdf values in parallel using OpenMP static void integrand( const boost::function<Float (const Vector &, EMeasure)> &pdfFn, size_t nPts, const Float *in, Float *out) { #if defined(MTS_OPENMP) #pragma omp parallel for #endif for (int i=0; i<(int) nPts; ++i) out[i] = pdfFn(sphericalDirection(in[2*i], in[2*i+1]), ESolidAngle) * std::sin(in[2*i]); } private: ELogLevel m_logLevel; Float m_tolerance; int m_thetaBins, m_phiBins; int m_numTests; size_t m_sampleCount; Float *m_table; Float *m_refTable; }; MTS_NAMESPACE_END #endif /* __MITSUBA_CORE_CHISQUARE_H_ */
invert.c
/* Copyright 2016. The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2016 Jon Tamir <jtamir@eecs.berkeley.edu> */ #include <stdlib.h> #include <assert.h> #include <complex.h> #include <stdio.h> #include "num/multind.h" #include "num/init.h" #include "misc/mmio.h" #include "misc/misc.h" #ifndef DIMS #define DIMS 16 #endif static const char usage_str[] = "<input> <output>"; static const char help_str[] = "Invert array (1 / <input>). The output is set to zero in case of divide by zero.\n"; int main_invert(int argc, char* argv[]) { mini_cmdline(&argc, argv, 2, usage_str, help_str); num_init(); long dims[DIMS]; complex float* idata = load_cfl(argv[1], DIMS, dims); complex float* odata = create_cfl(argv[2], DIMS, dims); #pragma omp parallel for for (long i = 0; i < md_calc_size(DIMS, dims); i++) odata[i] = idata[i] == 0 ? 0. : 1. / idata[i]; unmap_cfl(DIMS, dims, idata); unmap_cfl(DIMS, dims, odata); return 0; }
general_basis_get_vec.h
#ifndef _GENERAL_BASIS_GET_VEC_H #define _GENERAL_BASIS_GET_VEC_H #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "misc.h" namespace basis_general { template<class T> bool inline update_out_dense(std::complex<double> c, int sign, npy_intp n_vec,const std::complex<T> *in, std::complex<T> *out){ for(npy_intp i=0;i<n_vec;i++){ out[i] += T(sign) * std::complex<T>(c) * in[i]; } return true; } template<class T> bool inline update_out_dense(std::complex<double> c, int sign, npy_intp n_vec,const T *in, T *out){ if(std::abs(c.imag())>1.1e-15){ return false; } else{ T re = c.real(); for(npy_intp i=0;i<n_vec;i++){ out[i] += T(sign) * re * in[i]; } return true; } } template<class I,class T> bool get_vec_rep(general_basis_core<I> *B, I s, int &sign, const int nt, const npy_intp n_vec, const npy_intp Ns_full, const T in[], std::complex<double> c, T out[], const int depth) { bool err = true; if(nt<=0){ const npy_intp full = (Ns_full - s - 1)*n_vec; err = update_out_dense(c,sign,n_vec,in,&out[full]); return err; } int per = B->pers[depth]; double q = (2.0*M_PI*B->qs[depth])/per; std::complex<double> cc = std::exp(std::complex<double>(0,-q)); if(depth < nt-1){ for(int j=0;j<per && err;j++){ err = get_vec_rep(B,s,sign,nt,n_vec,Ns_full,in,c,out,depth+1); c *= cc; s = B->map_state(s,depth,sign); } return err; } else{ for(int j=0;j<per && err;j++){ const npy_intp full = (Ns_full - s - 1)*n_vec; err = update_out_dense(c,sign,n_vec,in,&out[full]); c *= cc; s = B->map_state(s,depth,sign); } return err; } } template<class I,class T> bool get_vec_rep_pcon(general_basis_core<I> *B, I s, int &sign, const int nt, const npy_intp n_vec, const I basis_pcon[], const npy_intp Ns_full, const T in[], std::complex<double> c, T out[], const int depth) { bool err = true; if(nt<=0){ const npy_intp full = binary_search(Ns_full,basis_pcon,s)*n_vec; err = update_out_dense(c,sign,n_vec,in,&out[full]); return err; } int per = B->pers[depth]; double q = (2.0*M_PI*B->qs[depth])/per; std::complex<double> cc = std::exp(std::complex<double>(0,-q)); if(depth < nt-1){ for(int j=0;j<per && err;j++){ err = get_vec_rep_pcon(B,s,sign,nt,n_vec,basis_pcon,Ns_full,in,c,out,depth+1); c *= cc; s = B->map_state(s,depth,sign); } return err; } else{ for(int j=0;j<per && err;j++){ const npy_intp full = binary_search(Ns_full,basis_pcon,s)*n_vec; err = update_out_dense(c,sign,n_vec,in,&out[full]); c *= cc; s = B->map_state(s,depth,sign); } return err; } } template<class I,class J,class T> bool get_vec_general_pcon_dense(general_basis_core<I> *B, const I basis[], const J n[], const npy_intp n_vec, const npy_intp Ns, const npy_intp Ns_full, const I basis_pcon[], const T in[], T out[]) { bool err = true; const int nt = B->get_nt(); const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); double norm = 1.0; for(int i=0;i<nt;i++){ norm *= B->pers[i]; } #pragma omp parallel for schedule(dynamic,chunk) firstprivate(norm) for(npy_intp k=0;k<Ns;k++){ if(!err){continue;} std::complex<double> c = 1.0/std::sqrt(n[k]*norm); int sign = 1; bool local_err = get_vec_rep_pcon(B,basis[k],sign,nt,n_vec,basis_pcon,Ns_full,&in[k*n_vec],c,out,0); if(!local_err){ #pragma omp critical err = local_err; } } return err; } template<class I,class J,class T> bool get_vec_general_dense(general_basis_core<I> *B, const I basis[], const J n[], const npy_intp n_vec, const npy_intp Ns, const npy_intp Ns_full, const T in[], T out[]) { bool err = true; const int nt = B->get_nt(); const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); double norm = 1.0; for(int i=0;i<nt;i++){ norm *= B->pers[i]; } #pragma omp parallel for schedule(dynamic,chunk) firstprivate(norm) for(npy_intp k=0;k<Ns;k++){ if(!err){continue;} std::complex<double> c = 1.0/std::sqrt(n[k]*norm); int sign = 1; bool local_err = get_vec_rep(B,basis[k],sign,nt,n_vec,Ns_full,&in[k*n_vec],c,out,0); if(!local_err){ #pragma omp critical err = local_err; } } return err; } } #endif
quicksort.h
// -*- C++ -*- // Copyright (C) 2007, 2008 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 2, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // You should have received a copy of the GNU General Public License // along with this library; see the file COPYING. If not, write to // the Free Software Foundation, 59 Temple Place - Suite 330, Boston, // MA 02111-1307, USA. // As a special exception, you may use this file as part of a free // software library without restriction. Specifically, if other files // instantiate templates or use macros or inline functions from this // file, or you compile this file and link it with other files to // produce an executable, this file does not by itself cause the // resulting executable to be covered by the GNU General Public // License. This exception does not however invalidate any other // reasons why the executable file might be covered by the GNU General // Public License. /** @file parallel/quicksort.h * @brief Implementation of a unbalanced parallel quicksort (in-place). * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUICKSORT_H #define _GLIBCXX_PARALLEL_QUICKSORT_H 1 #include <parallel/parallel.h> #include <parallel/partition.h> namespace __gnu_parallel { /** @brief Unbalanced quicksort divide step. * @param begin Begin iterator of subsequence. * @param end End iterator of subsequence. * @param comp Comparator. * @param pivot_rank Desired rank of the pivot. * @param num_samples Choose pivot from that many samples. * @param num_threads Number of threads that are allowed to work on * this part. */ template<typename RandomAccessIterator, typename Comparator> typename std::iterator_traits<RandomAccessIterator>::difference_type parallel_sort_qs_divide(RandomAccessIterator begin, RandomAccessIterator end, Comparator comp, typename std::iterator_traits <RandomAccessIterator>::difference_type pivot_rank, typename std::iterator_traits <RandomAccessIterator>::difference_type num_samples, thread_index_t num_threads) { typedef std::iterator_traits<RandomAccessIterator> traits_type; typedef typename traits_type::value_type value_type; typedef typename traits_type::difference_type difference_type; difference_type n = end - begin; num_samples = std::min(num_samples, n); // Allocate uninitialized, to avoid default constructor. value_type* samples = static_cast<value_type*>(::operator new(num_samples * sizeof(value_type))); for (difference_type s = 0; s < num_samples; ++s) { const unsigned long long index = static_cast<unsigned long long>(s) * n / num_samples; ::new(&(samples[s])) value_type(begin[index]); } __gnu_sequential::sort(samples, samples + num_samples, comp); value_type& pivot = samples[pivot_rank * num_samples / n]; __gnu_parallel::binder2nd<Comparator, value_type, value_type, bool> pred(comp, pivot); difference_type split = parallel_partition(begin, end, pred, num_threads); ::operator delete(samples); return split; } /** @brief Unbalanced quicksort conquer step. * @param begin Begin iterator of subsequence. * @param end End iterator of subsequence. * @param comp Comparator. * @param num_threads Number of threads that are allowed to work on * this part. */ template<typename RandomAccessIterator, typename Comparator> void parallel_sort_qs_conquer(RandomAccessIterator begin, RandomAccessIterator end, Comparator comp, thread_index_t num_threads) { typedef std::iterator_traits<RandomAccessIterator> traits_type; typedef typename traits_type::value_type value_type; typedef typename traits_type::difference_type difference_type; if (num_threads <= 1) { __gnu_sequential::sort(begin, end, comp); return; } difference_type n = end - begin, pivot_rank; if (n <= 1) return; thread_index_t num_threads_left; if ((num_threads % 2) == 1) num_threads_left = num_threads / 2 + 1; else num_threads_left = num_threads / 2; pivot_rank = n * num_threads_left / num_threads; difference_type split = parallel_sort_qs_divide(begin, end, comp, pivot_rank, _Settings::get().sort_qs_num_samples_preset, num_threads); #pragma omp parallel sections num_threads(2) { #pragma omp section parallel_sort_qs_conquer(begin, begin + split, comp, num_threads_left); #pragma omp section parallel_sort_qs_conquer(begin + split, end, comp, num_threads - num_threads_left); } } /** @brief Unbalanced quicksort main call. * @param begin Begin iterator of input sequence. * @param end End iterator input sequence, ignored. * @param comp Comparator. * @param n Length of input sequence. * @param num_threads Number of threads that are allowed to work on * this part. */ template<typename RandomAccessIterator, typename Comparator> void parallel_sort_qs(RandomAccessIterator begin, RandomAccessIterator end, Comparator comp, typename std::iterator_traits <RandomAccessIterator>::difference_type n, int num_threads) { _GLIBCXX_CALL(n) typedef std::iterator_traits<RandomAccessIterator> traits_type; typedef typename traits_type::value_type value_type; typedef typename traits_type::difference_type difference_type; if (n == 0) return; // At least one element per processor. if (num_threads > n) num_threads = static_cast<thread_index_t>(n); parallel_sort_qs_conquer(begin, begin + n, comp, num_threads); } } //namespace __gnu_parallel #endif
omp_parallel_for_reduction.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */ #define MAX_FACTOR 10 #define KNOWN_PRODUCT 3628800 /* 10! */ int test_omp_parallel_for_reduction() { int sum; int known_sum; double dsum; double dknown_sum; double dt=0.5; /* base of geometric row for + and - test*/ double rounding_error= 1.E-9; int diff; double ddiff; int product; int known_product; int logic_and; int logic_or; int bit_and; int bit_or; int exclusiv_bit_or; int logics[LOOPCOUNT]; int i; double dpt; int result; sum =0; dsum=0; dt = 1./3.; result = 0; product = 1; logic_and=1; logic_or=0; bit_and=1; bit_or=0; exclusiv_bit_or=0; /* Tests for integers */ known_sum = (LOOPCOUNT*(LOOPCOUNT+1))/2; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:sum) for (i=1;i<=LOOPCOUNT;i++) { sum=sum+i; } if(known_sum!=sum) { result++; fprintf(stderr,"Error in sum with integers: Result was %d" " instead of %d\n",sum,known_sum); } diff = (LOOPCOUNT*(LOOPCOUNT+1))/2; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:diff) for (i=1;i<=LOOPCOUNT;++i) { diff=diff-i; } if(diff != 0) { result++; fprintf(stderr,"Error in difference with integers: Result was %d" " instead of 0.\n",diff); } /* Tests for doubles */ dsum=0; dpt=1; for (i=0;i<DOUBLE_DIGITS;++i) { dpt*=dt; } dknown_sum = (1-dpt)/(1-dt); #pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:dsum) for (i=0;i<DOUBLE_DIGITS;++i) { dsum += pow(dt,i); } if( fabs(dsum-dknown_sum) > rounding_error ) { result++; fprintf(stderr,"Error in sum with doubles: Result was %f" " instead of %f (Difference: %E)\n", dsum, dknown_sum, dsum-dknown_sum); } dpt=1; for (i=0;i<DOUBLE_DIGITS;++i) { dpt*=dt; } fprintf(stderr,"\n"); ddiff = (1-dpt)/(1-dt); #pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:ddiff) for (i=0;i<DOUBLE_DIGITS;++i) { ddiff -= pow(dt,i); } if( fabs(ddiff) > rounding_error) { result++; fprintf(stderr,"Error in Difference with doubles: Result was %E" " instead of 0.0\n",ddiff); } /* Tests for integers */ #pragma omp parallel for schedule(dynamic,1) private(i) reduction(*:product) for(i=1;i<=MAX_FACTOR;i++) { product *= i; } known_product = KNOWN_PRODUCT; if(known_product != product) { result++; fprintf(stderr,"Error in Product with integers: Result was %d" " instead of %d\n\n",product,known_product); } /* Tests for logic AND */ for(i=0;i<LOOPCOUNT;i++) { logics[i]=1; } #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(&&:logic_and) for(i=0;i<LOOPCOUNT;++i) { logic_and = (logic_and && logics[i]); } if(!logic_and) { result++; fprintf(stderr,"Error in logic AND part 1.\n"); } logic_and = 1; logics[LOOPCOUNT/2]=0; #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(&&:logic_and) for(i=0;i<LOOPCOUNT;++i) { logic_and = logic_and && logics[i]; } if(logic_and) { result++; fprintf(stderr,"Error in logic AND part 2.\n"); } /* Tests for logic OR */ for(i=0;i<LOOPCOUNT;i++) { logics[i]=0; } #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(||:logic_or) for(i=0;i<LOOPCOUNT;++i) { logic_or = logic_or || logics[i]; } if(logic_or) { result++; fprintf(stderr,"Error in logic OR part 1.\n"); } logic_or = 0; logics[LOOPCOUNT/2]=1; #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(||:logic_or) for(i=0;i<LOOPCOUNT;++i) { logic_or = logic_or || logics[i]; } if(!logic_or) { result++; fprintf(stderr,"Error in logic OR part 2.\n"); } /* Tests for bitwise AND */ for(i=0;i<LOOPCOUNT;++i) { logics[i]=1; } #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(&:bit_and) for(i=0;i<LOOPCOUNT;++i) { bit_and = (bit_and & logics[i]); } if(!bit_and) { result++; fprintf(stderr,"Error in BIT AND part 1.\n"); } bit_and = 1; logics[LOOPCOUNT/2]=0; #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(&:bit_and) for(i=0;i<LOOPCOUNT;++i) { bit_and = bit_and & logics[i]; } if(bit_and) { result++; fprintf(stderr,"Error in BIT AND part 2.\n"); } /* Tests for bitwise OR */ for(i=0;i<LOOPCOUNT;i++) { logics[i]=0; } #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(|:bit_or) for(i=0;i<LOOPCOUNT;++i) { bit_or = bit_or | logics[i]; } if(bit_or) { result++; fprintf(stderr,"Error in BIT OR part 1\n"); } bit_or = 0; logics[LOOPCOUNT/2]=1; #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(|:bit_or) for(i=0;i<LOOPCOUNT;++i) { bit_or = bit_or | logics[i]; } if(!bit_or) { result++; fprintf(stderr,"Error in BIT OR part 2\n"); } /* Tests for bitwise XOR */ for(i=0;i<LOOPCOUNT;i++) { logics[i]=0; } #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(^:exclusiv_bit_or) for(i=0;i<LOOPCOUNT;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } if(exclusiv_bit_or) { result++; fprintf(stderr,"Error in EXCLUSIV BIT OR part 1\n"); } exclusiv_bit_or = 0; logics[LOOPCOUNT/2]=1; #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(^:exclusiv_bit_or) for(i=0;i<LOOPCOUNT;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } if(!exclusiv_bit_or) { result++; fprintf(stderr,"Error in EXCLUSIV BIT OR part 2\n"); } /*printf("\nResult:%d\n",result);*/ return (result==0); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_parallel_for_reduction()) { num_failed++; } } return num_failed; }
sindex_query_old.h
#include <assert.h> #include <math.h> #include "helpers.h" #include "globals.h" #include <iostream> #include <algorithm> #include <chrono> #include <immintrin.h> #ifndef _SINDEXQUERYOLD_ #define _SINDEXQUERYOLD_ inline uint32_t query_range( ky_t* dev_key, ky_t &key, ky_t* keys, uint32_t query_start, uint32_t query_end, uint32_t left, uint32_t mid, uint32_t right, ky_t* query_buffer, uint32_t querysize, int_t* dev_pos, int_t &hst_pos) { QueryStatus result; do { assert(cudaMemcpy(query_buffer, keys + mid, querysize * sizeof(ky_t), cudaMemcpyHostToDevice) == cudaSuccess); assert(cudaMemcpy(dev_pos, &int_max, sizeof(int_t), cudaMemcpyHostToDevice) == cudaSuccess); // run kernel for mid buffer in the mean time if (right != UINT32_MAX) { querysize = (right - mid < QUERYSIZE) ? right - mid : QUERYSIZE; } else { querysize = (query_end - mid < QUERYSIZE) ? query_end - mid : QUERYSIZE; } query_kernel <<<get_block_num(querysize), BLOCKSIZE, BLOCKSIZE / 32 * sizeof(int_t)>>> (dev_key, query_buffer, querysize, dev_pos); assert(cudaGetLastError() == cudaSuccess); // get result from mid buffer assert(cudaMemcpy(&hst_pos, dev_pos, sizeof(int_t), cudaMemcpyDeviceToHost) == cudaSuccess); // evaluate result if (hst_pos != UINT32_MAX) { if (memcmp(&key, keys + mid + hst_pos, sizeof(ky_t)) == 0) { result = found_target; return mid + hst_pos; } else if (hst_pos > 0) { if (memcmp(&key, keys + mid + hst_pos, sizeof(ky_t)) < 0) { --hst_pos; } result = found_target; return mid + hst_pos; } else { result = target_left; } } else { result = target_right; } if (result != found_target) { switch (result) { case target_left: query_end = mid; mid = left; break; case target_right: query_start = mid + QUERYSIZE; mid = right; break; } if (query_end - query_start <= QUERYSIZE) { if (mid > query_start) { left = query_start; } else { left = UINT32_MAX; } right = UINT32_MAX; } else if (query_end - query_start <= 2 * QUERYSIZE) { if (query_start < mid) { left = query_start; } else { left = UINT32_MAX; } if (query_start + QUERYSIZE < mid) { right = query_start + QUERYSIZE; } else { right = UINT32_MAX; } } else { left = (mid + query_start - QUERYSIZE) / 2; right = (query_end + mid + QUERYSIZE) / 2; } } } while (result != found_target); } inline uint32_t get_position_from_group( const group_t* group, ky_t &key, ky_t* keys, ky_t* dev_key, int_t* dev_pos, ky_t* query_buffer) { // determine query range fp_t prediction = 0; for (ky_size_t feat_i = 0; feat_i < group->n + 1; ++feat_i) { if (feat_i == group->n) { prediction += *(group->weights + feat_i); } else { ky_size_t char_i = *(group->feat_indices + feat_i); ch_t character = *(((ch_t*) key) + char_i); fp_t weight = *(group->weights + feat_i); prediction += weight * ((fp_t) character); } } uint32_t query_start; uint32_t query_end; uint32_t left; uint32_t right; uint32_t mid; uint32_t querysize; // shift query borders if ((int64_t) (prediction - group->left_err) - 1 < (int64_t) group->start || prediction - group->left_err - 1 < 0) { query_start = group->start; } else if ((int64_t) (prediction - group->left_err) - 1 > (int64_t) (group->start + group->m)) { return group->start + group->m - 1; } else { query_start = (uint32_t) (prediction - group->left_err) - 1; } if ((int64_t) ceil(prediction - group->right_err) + 1 < (int64_t) group->start || ceil(prediction - group->right_err) + 1 < 0) { return group->start; } else if ((int64_t) ceil(prediction - group->right_err) + 1 > (int64_t) (group->start + group->m)) { query_end = group->start + group->m; } else { query_end = ceil(prediction - group->right_err) + 1; } int_t hst_pos; if (query_start == query_end - 1) { hst_pos = query_start; } else { if (prediction < group->start) { prediction = group->start; } else if (prediction >= group->start + group->m) { prediction = group->start + group->m - 1; } // kernel indices if (query_end - query_start <= QUERYSIZE) { left = UINT32_MAX; right = UINT32_MAX; mid = query_start; } else if (query_end - query_start <= 2 * QUERYSIZE) { if (prediction < query_start + QUERYSIZE) { left = UINT32_MAX; mid = query_start; right = query_start + QUERYSIZE; } else { left = query_start; mid = query_end - QUERYSIZE; right = UINT32_MAX; } } else { if (prediction - query_start < 0.5 * QUERYSIZE) { left = UINT32_MAX; mid = query_start; right = (query_end + mid + QUERYSIZE) / 2; } else if (query_end - prediction < 0.5 * QUERYSIZE) { right = UINT32_MAX; mid = query_end - QUERYSIZE - 1; left = (mid + query_start - QUERYSIZE) / 2; } else { mid = (uint32_t) (prediction - QUERYSIZE / 2); querysize = (mid - query_start < QUERYSIZE) ? mid - query_start : QUERYSIZE; left = (mid + query_start - querysize) / 2; querysize = (query_end - mid < QUERYSIZE) ? query_end - mid : QUERYSIZE; right = (query_end + mid + querysize) / 2; } } if (right != UINT32_MAX) { querysize = (right - mid < QUERYSIZE) ? right - mid : QUERYSIZE; } else { querysize = (query_end - mid < QUERYSIZE) ? query_end - mid : QUERYSIZE; } hst_pos = query_range( dev_key, key, keys, query_start, query_end, left, mid, right, query_buffer, querysize, dev_pos, hst_pos ); } return hst_pos; } inline uint32_t get_position_from_index( const index_t* index, ky_t &key, ky_t* keys, ky_t* dev_key, int_t* dev_pos, ky_t* query_buffer) { int_t hst_pos = UINT32_MAX; assert(cudaMemcpy(dev_key, &key, sizeof(ky_t), cudaMemcpyHostToDevice) == cudaSuccess); uint32_t query_start; uint32_t query_end; uint32_t left; uint32_t right; uint32_t mid; uint32_t querysize; if (index->root_n > 0) { query_start = 0; query_end = index->root_n; if (query_start == query_end - 1) { hst_pos = query_start; } else { // kernel indices if (query_end - query_start <= QUERYSIZE) { left = UINT32_MAX; right = UINT32_MAX; mid = query_start; } else if (query_end - query_start <= 2 * QUERYSIZE) { left = UINT32_MAX; mid = query_start; right = query_end - QUERYSIZE; } else { mid = (uint32_t) (query_end - query_start - QUERYSIZE / 2); left = (mid + query_start - QUERYSIZE) / 2; right = (query_end + mid + QUERYSIZE) / 2; } if (right != UINT32_MAX) { querysize = (right - mid < QUERYSIZE) ? right - mid : QUERYSIZE; } else { querysize = (query_end - mid < QUERYSIZE) ? query_end - mid : QUERYSIZE; } hst_pos = query_range( dev_key, key, index->root_pivots, query_start, query_end, left, mid, right, query_buffer, querysize, dev_pos, hst_pos ); } } else { hst_pos = 0; } group_t* root = index->roots + hst_pos; hst_pos = get_position_from_group( root, key, index->group_pivots, dev_key, dev_pos, query_buffer ); group_t* group = index->groups + hst_pos; hst_pos = get_position_from_group( group, key, keys, dev_key, dev_pos,query_buffer ); return hst_pos; } ///////////////////////////// ///////////////////////////// inline uint32_t search(ky_t* key, ky_t* keys, uint32_t query_start, uint32_t query_end) { uint32_t pos = query_start; assert(query_start <= query_end); assert(query_end - query_start <= CPUCORES); //#pragma omp parallel for num_threads(query_end - query_start) reduction(max:pos) for (uint32_t thread_i = query_start; thread_i < query_end; ++thread_i) { if (memcmp(keys + thread_i, key, sizeof(ky_t)) <= 0) { pos = std::max(pos, thread_i); } } return pos; } inline uint32_t exponential( ky_t *key, ky_t* keys, uint32_t query_start, uint32_t query_end) { uint32_t pos; uint32_t exponent = 0; uint32_t boundary_exponent = 0; uint32_t boundary = query_start; bool finished = false; while (!finished) { //#pragma omp parallel for num_threads(CPUCORES) reduction(max:boundary_exponent) for (uint32_t thread_i = 0; thread_i < CPUCORES; ++thread_i) { int64_t index; int64_t cmp; if (query_start < query_end) { index = query_start + pow(2, exponent + thread_i); if (index < query_end) { cmp = memcmp(keys + index, key, sizeof(ky_t)); } else { cmp = 1; } } else { index = query_start - pow(2, exponent + thread_i); if (index > query_end) { cmp = memcmp(key, keys + index, sizeof(ky_t)); } else { cmp = 1; } } if (cmp <= 0) { boundary_exponent = std::max(exponent + thread_i, boundary_exponent); } else { finished = true; } } exponent += CPUCORES; } uint32_t power = pow(2, boundary_exponent + 1); if (query_start > query_end) { boundary -= power; if (boundary < query_end) { boundary = query_end; } } else { boundary += power; if (boundary > query_end) { boundary = query_end; } } return boundary; } inline uint32_t binary( ky_t* key, ky_t* keys, uint32_t query_start, uint32_t query_end) { assert(query_start <= query_end); uint32_t pos = query_start; while(query_start + CPUCORES < query_end) { uint32_t interval_len = safe_division(query_end - query_start, CPUCORES); #pragma omp parallel for num_threads(CPUCORES) reduction(max:pos) for (uint32_t thread_i = 0; thread_i < CPUCORES; ++thread_i) { uint32_t index = query_start + thread_i * interval_len; if (memcmp(keys + index, key, sizeof(ky_t)) <= 0) { pos = std::max(pos, index); } } query_start = pos; query_end = query_start + interval_len; } pos = search(key, keys, query_start, query_end); return pos; } inline fp_t predict256( ky_t* key, ky_size_t* feat_indices, fp_t* weights, ky_size_t n) { // debug fp_t prediction2 = 0; for (ky_size_t feat_i = 0; feat_i < n + 1; ++feat_i) { if (feat_i == n) { prediction2 += *(weights + feat_i); } else { ky_size_t char_i = *(feat_indices + feat_i); ch_t character = *(((ch_t*) key) + char_i); fp_t weight = *(weights + feat_i); prediction2 += weight * ((fp_t) character); } } // copy key values into double array fp_t key_vals[n]; //#pragma omp parallel for num_threads(CPUCORES) for (ky_size_t feat_i = 0; feat_i < n; ++feat_i) { *(key_vals + feat_i) = *(((ch_t*) key) + *(feat_indices + feat_i)); } fp_t prediction = 0; // check if registers are necessary if (n < 4) { //#pragma omp parallel for num_threads(3) reduction(+:prediction) for (ky_size_t feat_i = 0; feat_i < n; ++feat_i) { prediction += *(key_vals + feat_i) * *(weights + feat_i); } } else { __m256d K[CPUCORES]; __m256d W[CPUCORES]; __m256d S = _mm256_setzero_pd(); // n divided by 4 //#pragma omp parallel for num_threads(CPUCORES) for (ky_size_t vector_i = 0; vector_i < (n >> 2); ++vector_i) { // copy to double array // load registers *(K + vector_i) = _mm256_loadu_pd(key_vals + 4 * vector_i); *(W + vector_i) = _mm256_loadu_pd(weights + 4 * vector_i); // fused multiply add S = _mm256_fmadd_pd(*(K + vector_i), *(W + vector_i), S); } S = _mm256_hadd_pd(S, S); prediction += *((fp_t*) &S) + *(((fp_t*) &S) + 2); //#pragma omp parallel for num_threads(3) reduction(+:prediction) for (size_t feat_i = (n & (~3)); feat_i < n; ++feat_i) { prediction += *(key_vals + feat_i) * *(weights + feat_i); } } // add y-shift prediction += *(weights + n); return prediction; } inline uint32_t query_group(ky_t* key, group_t* group, ky_t* keys) { uint32_t pos; // calculate prediction fp_t prediction = predict256(key, group->feat_indices, group->weights, group->n); // set boundaries by error // boundaries are last possible indices ! // int64_t left_boundary = floor(prediction - group->left_err) - 1; // int64_t right_boundary = ceil (prediction - group->right_err) + 2; // // shift boundaries into group // if (left_boundary < group->start) { // left_boundary = group->start; // } else if (left_boundary > group->start + group->m - 1) { // left_boundary = group->start + group->m - 1; // } // if (right_boundary < group->start) { // right_boundary = group->start; // } else if (right_boundary > group->start + group->m - 1) { // right_boundary = group->start + group->m - 1; // } // // // get result if boundaries are small // if (right_boundary - left_boundary < CPUCORES) { // return search(key, keys, left_boundary, right_boundary + 1); // } uint32_t left_boundary = group->start; uint32_t right_boundary = group->start + group->m - 1; // query start is first element int64_t query_start = round(prediction) - CPUCORES / 2; // shift query start if (query_start < left_boundary) { query_start = left_boundary; } else if (query_start > right_boundary + 1) { query_start = right_boundary + 1; } // query end is last element not in query int64_t query_end = query_start + CPUCORES; // shift query end if (query_end < left_boundary) { query_end = left_boundary; } else if (query_end > right_boundary + 1) { query_end = right_boundary + 1; } // search around prediction pos = search(key, keys, query_start, query_end); // position found if (pos > query_start && pos < query_end - 1 || memcmp(key, keys + pos, sizeof(ky_t)) == 0 || pos == right_boundary || pos < right_boundary && memcmp(key, keys + pos, sizeof(ky_t)) > 0 && memcmp(key, keys + pos + 1, sizeof(ky_t)) < 0) { return pos; } uint32_t boundary; // determine range and direction if (pos == query_start) { // search left --query_start; boundary = left_boundary; } else if (pos == query_start + CPUCORES - 1) { // search right query_start = query_start + CPUCORES; boundary = right_boundary; } if (abs(boundary - query_start) < CPUCORES) { if (query_start < boundary) { pos = search(key, keys, query_start, boundary + 1); } else { pos = search(key, keys, boundary, query_start + 1); } return pos; } boundary = exponential(key, keys, query_start, boundary); if (query_start < boundary) { pos = binary(key, keys, query_start, boundary + 1); } else { pos = binary(key, keys, boundary, query_start + 1); } return pos; } inline uint32_t get_position_from_index2(const index_t* index, ky_t* key, ky_t* keys) { uint32_t pos; pos = binary(key, index->root_pivots, 0, index->root_n); group_t* root_i = ((group_t*) index->roots) + pos; pos = query_group(key, root_i, index->group_pivots); group_t* group_i = ((group_t*) index->groups) + pos; pos = query_group(key, group_i, keys); return pos; } #endif // _SINDEXQUERYOLD_
PhysicalSystemFEM.h
//Design Notes: //Physical System: //MultiVector<ElementTypes> //MultiVector<DOFType> //Store DOFs in continguous memory //Each element // Quadrature // Energy(ShapeFunction, Energy, position) -> Energy(ShapeFunction(position)); // ShapeFunction + Kinematics (DOF + gradients I might need ....) // Gradient // Hessian (maybe, I'm not sure) // How to represent DOFS ? Pointers to list from Physical System ? Has to be, since elements like forces are connections // Energy // //Gradient // //Hessian //How should lists of DOFS work ? // Container<DOFs> #ifndef PHYSICALSYSTEMFEM_H #define PHYSICALSYSTEMFEM_H #include <vector> #include <DOFParticle.h> #include <DOFList.h> #include <UtilitiesEigen.h> namespace Gauss { namespace FEM { template<typename DataType, typename ElementType> class PhysicalSystemFEMImpl { public: //temporary global indices until I update the state to give these to me //automatically PhysicalSystemFEMImpl(const Eigen::Ref<Eigen::MatrixXd> &V, const Eigen::Ref<Eigen::MatrixXi> &F) : m_q(V.rows()), m_qDot(V.rows()) { m_V = V; m_F = F; m_numVerts = m_V.rows(); m_numElements = m_F.rows(); assert(m_V.cols() == 3); //3D only for now //initialize all the elements Eigen::MatrixXi Felement; std::array<DOFBase<DataType,0> *, ElementType::numDOFs()> qDOFArray; std::array<DOFBase<DataType,1> *, ElementType::numDOFs()> qDotDOFArray; for(unsigned int iel=0; iel < m_numElements; iel++) { for(unsigned int idof=0;idof < ElementType::numDOFs(); ++idof) { qDOFArray[idof] = &m_q[F(iel,idof)]; qDotDOFArray[idof] = &m_qDot[F(iel,idof)]; } Felement = m_F.row(iel); m_elements.push_back( new ElementType(m_V,Felement, qDOFArray, qDotDOFArray) ); } } ~PhysicalSystemFEMImpl() { } DataType getEnergy(const State<DataType> &state) const { double energy = 0.0; for(auto &element : m_elements) { energy += element->getEnergy(state); } return energy; } DataType getKineticEnergy(const State<DataType> &state) const { double energy = 0.0; for(auto &element : m_elements) { energy += element->getKineticEnergy(state); } return energy; } DataType getBodyForceEnergy(const State<DataType> &state) const { DataType energy = 0.0; #if defined(_WIN32) || defined(_WIN64) || defined (WIN32) for(auto &element : m_elements) { energy += element->getBodyForceWork(state); } #else #pragma omp parallel for reduction(+: energy) for(unsigned int ii=0; ii<m_elements.size(); ++ii) { energy = energy + m_elements[ii]->getBodyForceWork(state); } #endif return energy; } DataType getStrainEnergy(const State<DataType> &state) const { DataType energy = 0.0; #if defined(_WIN32) || defined(_WIN64) || defined (WIN32) for(auto &element : m_elements) { energy += element->getStrainEnergy(state); } #else #pragma omp parallel for reduction(+: energy) for(unsigned int ii=0; ii<m_elements.size(); ++ii) { energy = energy + m_elements[ii]->getStrainEnergy(state); } #endif return energy; } decltype(auto) getStrainEnergyPerElement(const State<DataType> &state) const { Eigen::VectorXx<DataType> energyPerElement(m_elements.size()); for(int i=0; i < m_elements.size(); i++) { energyPerElement[i] = m_elements[i]->getStrainEnergy(state); } return energyPerElement; } template<typename Assembler> inline void getMassMatrix(Assembler &assembler, const State<DataType> &state) const { //call the assembler on all elements forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) { element->getMassMatrix(assemble,state); }); } template<typename Assembler> inline void getStiffnessMatrix(Assembler &assembler, const State<DataType> &state) const { forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) { element->getStiffnessMatrix(assemble, state); }); } template<typename Assembler> inline void getForce(Assembler &assembler, const State<DataType> &state) const { forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) { element->getForce(assemble, state); }); } template<typename Assembler> inline void getInternalForce(Assembler &assembler, const State<DataType> &state) const { forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) { element->getInternalForce(assemble, state); }); } template<typename Assembler> inline void getBodyForce(Assembler &assembler, const State<DataType> &state) const { forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) { element->getBodyForce(assemble, state); }); } inline unsigned int getNumElements() { return m_elements.size(); } inline ElementType * getElement(unsigned int i) { assert(i < m_elements.size()); return m_elements[i]; } inline std::vector<ElementType *> & getElements() { return m_elements; } inline const std::vector<ElementType *> & getElements() const { return m_elements; } inline const ElementType * getElement(unsigned int i) const { assert(i < m_elements.size()); return m_elements[i]; } inline auto & getQ() { return m_q; } inline const auto & getQ() const { return m_q; } inline auto & getQDot() { return m_qDot; } inline const auto & getQDot() const { return m_qDot; } //get function supporting a vertex (these return arrays in order to slot directly into assemblers) inline decltype(auto) getQ(unsigned int vertexId) const { std::array<const DOFBase<DataType,0> *,1> toReturn = {{&m_q[vertexId]}}; return toReturn; } inline decltype(auto) getQDot(unsigned int vertexId) const { std::array<const DOFBase<DataType,1> *,1> toReturn = {{&m_qDot[vertexId]}}; return toReturn; } template<typename Vector> inline decltype(auto) getQ(Vector &x, unsigned int elementId) const { std::cout<<"Error not implemented \n"; exit(0); std::array<const DOFBase<DataType,0> *, 1> toReturn = {{&m_q[elementId]}}; return toReturn; } template<typename Vector> inline decltype(auto) getQDot(Vector &x, unsigned int elementId) const { std::cout<<"Error not implemented \n"; exit(0); std::array<const DOFBase<DataType,1> *,1> toReturn = {{&m_qDot[elementId]}}; return toReturn; } inline auto & getV() { return m_V; } inline auto & getF() { return m_F; } inline const auto & getV() const { return m_V; } inline const auto & getF() const { return m_F; } //methods for getting current positions and position Jacobians for this system //Per-Vertex inline const auto getPosition(const State<DataType> &state, unsigned int vertexId) const { return getV().row(vertexId).transpose() + mapDOFEigen(m_q[vertexId], state); } inline const auto getVelocity(const State<DataType> &state, unsigned int vertexId) const { return mapDOFEigen(m_qDot[vertexId], state); } inline const auto getDPDQ(const State<DataType> &state, unsigned int vertexId) const { return Eigen::Matrix33x<DataType>::Identity(); } inline const auto getDPDQ(const State<DataType> &state, unsigned int elementId, const Eigen::Vector3x<DataType> &pos) const { exit(0); return Eigen::Matrix33x<DataType>::Identity(); } //want these for elements as well (i.e take an element indec and a point in space and return the right value) inline auto getGeometry() { return std::make_pair(std::ref(m_V), std::ref(m_F)); } protected: //Mesh Eigen::MatrixXd m_V; Eigen::MatrixXi m_F; long m_numVerts; long m_numElements; DOFList<DataType, DOFParticle, 0> m_q; DOFList<DataType, DOFParticle, 1> m_qDot; std::vector<ElementType *> m_elements; //DataType m_mass; //mass of particle //DOFParticle<DataType,0> m_x; //DOFParticle<DataType,1> m_xDot; private: }; template<typename DataType, template <typename A> class ElementType> using PhysicalSystemFEM = PhysicalSystem<DataType, PhysicalSystemFEMImpl<DataType, ElementType<DataType> > >; } } #endif
elemwise_binary_scalar_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_scalar_op.h * \brief Function definition of elementwise binary scalar operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #include <mxnet/operator_util.h> #include <vector> #include <utility> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "elemwise_unary_op.h" namespace mxnet { namespace op { class BinaryScalarOp : public UnaryOp { /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { const double alpha = nnvm::get<double>(attrs.parsed); CHECK_EQ(output.shape(), input.shape()); const int64_t row_count = output.shape()[0]; const int64_t items_per_row = output.shape().Size() / row_count; const DType result_for_zero = OP::Map(DType(0), DType(alpha)); mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream); mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream); const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size(); if (sparse_row_count != row_count) { mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data( rowsparse::kIdx).FlatTo1D<cpu, IType>(stream); int64_t input_iter = 0; int64_t output_row = 0; IType next_input_row = 0; while (output_row < row_count) { next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count; // Split up into blocks of contiguous data and do those together // Do contiguous dense blocks const int64_t dense_block_count = next_input_row - output_row; if (dense_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch( stream, items_per_row * dense_block_count, output_data.dptr_ + items_per_row * output_row, result_for_zero); }); output_row += dense_block_count; continue; } // Do contiguous sparse blocks int64_t next_non_contiguous_sparse = input_iter; while (next_non_contiguous_sparse < sparse_row_count - 1) { if (row_indexes[next_non_contiguous_sparse + 1] != row_indexes[next_non_contiguous_sparse] + 1) { break; } ++next_non_contiguous_sparse; } const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1; if (sparse_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * sparse_block_count, &output_data.dptr_[items_per_row * output_row], &input_data.dptr_[items_per_row * input_iter], DType(alpha)); }); output_row += sparse_block_count; input_iter += sparse_block_count; continue; } } } else { // All rows exist (eventually we don't have to do complex // things to call GPU kernels because we don't need to access row indices) MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha)); }); } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { CHECK_EQ(output.shape(), input.shape()); const double alpha = nnvm::get<double>(attrs.parsed); const DType dense_fill_val = OP::Map(DType(0), DType(alpha)); const TBlob column_indexes = input.aux_data(csr::kIdx); const size_t item_count = column_indexes.Size(); // Pre-fill dense with 0-input/output value FillDense<DType>(stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>()); mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data()); if (item_count) { const DType *in = input.data().dptr<DType>(); const IType *column_indexes_ptr = column_indexes.dptr<IType>(); const auto row_count = static_cast<size_t>(input.shape()[0]); const TBlob row_starts = input.aux_data(csr::kIndPtr); const CType *row_starts_ptr = row_starts.dptr<CType>(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(row_count); ++i) { const bool last_row = i == static_cast<int>(row_count) - 1; // Split up into blocks of contiguous data and do those together const size_t row_item_start_iter = row_starts_ptr[i]; const size_t input_items_this_row = !last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter : item_count - row_item_start_iter; if (input_items_this_row) { const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter; const DType *row_data_start = in + row_item_start_iter; DType *output_this_row = out[i].dptr_; // More overhead to use OMP for small loops, so don't if (input_items_this_row > 1000) { #pragma omp parallel for for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } else { for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } } } } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } template<typename xpu, typename OP, typename DType, typename IType> static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray output) { mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>(); CHECK_EQ(output.storage_type(), kDefaultStorage); switch (input.storage_type()) { case kRowSparseStorage: { ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output); break; } case kCSRStorage: { MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, { ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output); }); break; } default: CHECK(false) << "Unsupported sparse storage type"; break; } } public: template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else if (out_stype == kDefaultStorage && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]); }); }); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename OP> static void LogicComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename OP> static void Backward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req< mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, xpu>:: Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), DType(alpha)); }); }); } }; #define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr_parser([](NodeAttrs* attrs) { \ attrs->parsed = std::stod(attrs->dict["scalar"]); \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .add_argument("data", "NDArray-or-Symbol", "source input") \ .add_argument("scalar", "float", "scalar input") } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
run_encap_decap.c
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Encapsulate a secret and use the secret to encrypt a message Decapsulate the secret and use the secret to decrypt the encrypted message */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <amcl/utils.h> #include <amcl/randapi.h> #include <amcl/bls_BLS381.h> #include <oqs/oqs.h> #include <pqnist/pqnist.h> #define NTHREADS 8 #define MAXSIZE 256 #define G2LEN 4*BFS_BLS381 int main() { int i,rc; // Seed value for CSPRNG char seed[PQNIST_SEED_LENGTH]; octet SEED = {sizeof(seed),sizeof(seed),seed}; // Seed value for key generation char seedkeys[NTHREADS][PQNIST_SEED_LENGTH]; csprng RNG; // Initialization vector char iv[PQNIST_AES_IV_LENGTH]; octet IV= {sizeof(iv),sizeof(iv),iv}; // Message to be sent to Bob char p[NTHREADS][MAXSIZE]; octet P[NTHREADS]; // AES CBC ciphertext char c[NTHREADS][MAXSIZE]; octet C[NTHREADS]; // non random seed value for (i=0; i<PQNIST_SEED_LENGTH; i++) SEED.val[i]=i+1; printf("SEED: "); OCT_output(&SEED); printf("\n"); // initialise random number generator CREATE_CSPRNG(&RNG,&SEED); // Initialise key generation seed for(i=0; i<NTHREADS; i++) { for(int j=0; j<PQNIST_SEED_LENGTH; j++) { seedkeys[i][j] = i; } } // Bob's SIKE keys uint8_t SIKEpk[NTHREADS][OQS_KEM_sike_p751_length_public_key]; uint8_t SIKEsk[NTHREADS][OQS_KEM_sike_p751_length_secret_key]; // Alice's BLS keys (not used) char BLSpk[NTHREADS][G2LEN]; char BLSsk[NTHREADS][BGS_BLS381]; #pragma omp parallel for for(i=0; i<NTHREADS; i++) { rc = pqnist_keys(seedkeys[i], SIKEpk[i], SIKEsk[i], BLSpk[i], BLSsk[i]); if (rc) { fprintf(stderr, "ERROR pqnist_keys rc: %d\n", rc); exit(EXIT_FAILURE); } int j = OQS_KEM_sike_p751_length_public_key; printf("Bob SIKE pklen %d pk: ", j); amcl_print_hex(SIKEpk[i], j); j = OQS_KEM_sike_p751_length_secret_key; printf("Bob SIKE sklen %d sk: ", j); amcl_print_hex(SIKEsk[i], j); } // Alice for(i=0; i<NTHREADS; i++) { bzero(p[i],sizeof(p[i])); P[i].max = MAXSIZE; P[i].len = sprintf(p[i], "Hello Bob! This is a message from Alice %d", i); P[i].val = p[i]; // Pad message int l = 16 - (P[i].len % 16); if (l < 16) { OCT_jbyte(&P[i],0,l); } } // Random initialization value generateRandom(&RNG,&IV); printf("Alice IV: "); OCT_output(&IV); // Copy plaintext for(i=0; i<NTHREADS; i++) { C[i].val = c[i]; C[i].max = MAXSIZE; OCT_copy(&C[i],&P[i]); printf("Alice Plaintext: "); OCT_output_string(&C[i]); printf("\n"); } // SIKE encapsulated key uint8_t ek[NTHREADS][OQS_KEM_sike_p751_length_ciphertext]; #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Generate an AES which is ecapsulated using SIKE. Use this key to // AES encrypt the K parameter. rc = pqnist_encapsulate_encrypt(C[i].val, C[i].len, IV.val, SIKEpk[i], ek[i]); if(rc) { fprintf(stderr, "ERROR pqnist_encapsulate_encrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Alice ciphertext: "); OCT_output(&C[i]); printf("Alice ek %lu ek: ", sizeof(ek[i])); amcl_print_hex(ek[i], sizeof(ek[i])); printf("\n"); } // Bob #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Obtain encapsulated AES key and decrypt C rc = pqnist_decapsulate_decrypt(C[i].val, C[i].len, IV.val, SIKEsk[i], ek[i]); if(rc) { fprintf(stderr, "ERROR pqnist_decapsulate_decrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Bob Plaintext: "); OCT_output(&C[i]); printf("Bob Plaintext: "); OCT_output_string(&C[i]); printf("\n"); // Compare sent and recieved message (returns 0 for failure) rc = OCT_comp(&P[i],&C[i]); if(!rc) { fprintf(stderr, "ERROR OCT_comp rc: %d\n", rc); exit(EXIT_FAILURE); } } // clear memory OCT_clear(&IV); for(i=0; i<NTHREADS; i++) { OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); OQS_MEM_cleanse(BLSsk[i], OQS_SIG_picnic_L5_FS_length_secret_key); OCT_clear(&P[i]); OCT_clear(&C[i]); } KILL_CSPRNG(&RNG); exit(EXIT_SUCCESS); }
simd5.c
#include <math.h> void main(int n,int m,float *a,float *b) { int i; #pragma omp simd order(concurrent) { for (i = 1; i < n; i++) b[i] = ((a[i] + a[i - 1]) / 2.0); } }
CSR.h
#ifndef _CSR_H_ #define _CSR_H_ #include "CSC.h" #include "Deleter.h" #include "Triple.h" #include <algorithm> #include <cassert> #include <cstdlib> #include <iostream> #include <random> #include <vector> //#include <tbb/scalable_allocator.h> #include <omp.h> #include "grb_util.h" #include "utility.h" using namespace std; template <class IT, class NT> class CSR { public: CSR() : nnz(0), rows(0), cols(0), zerobased(true) {} CSR(IT mynnz, IT m, IT n) : nnz(mynnz), rows(m), cols(n), zerobased(true) { // Constructing empty Csc objects (size = 0) are allowed (why wouldn't // they?). assert(rows != 0); rowptr = my_malloc<IT>(rows + 1); if (nnz > 0) { colids = my_malloc<IT>(nnz); values = my_malloc<NT>(nnz); } } CSR(graph &G); CSR(string filename); CSR(const CSC<IT, NT> &csc); // CSC -> CSR conversion CSR(const CSR<IT, NT> &rhs); // copy constructor CSR(const CSC<IT, NT> &csc, const bool transpose); // CSR(const GrB_Matrix &A); // construct from GraphBLAS matrix CSR(GrB_Matrix *A, bool dup_mat = false); // construct from GraphBLAS // matrix pointers CSR(GrB_Matrix A); // construct from GraphBLAS matrix (unpack) CSR<IT, NT> &operator=(const CSR<IT, NT> &rhs); // assignment operator bool operator==(const CSR<IT, NT> &rhs); // ridefinizione == void shuffleIds(); // Randomly permutating column indices void sortIds(); // Permutating column indices in ascending order void make_empty() { if (nnz > 0) { if (colids != NULL) my_free<IT>(colids); if (values != NULL) my_free<NT>(values); nnz = 0; } if (rows > 0) { if (rowptr != NULL) my_free<IT>(rowptr); rows = 0; } cols = 0; } ~CSR() { make_empty(); } bool ConvertOneBased() { if (!zerobased) // already one-based return false; transform(rowptr, rowptr + rows + 1, rowptr, bind2nd(plus<IT>(), static_cast<IT>(1))); transform(colids, colids + nnz, colids, bind2nd(plus<IT>(), static_cast<IT>(1))); zerobased = false; return true; } bool ConvertZeroBased() { if (zerobased) return true; transform(rowptr, rowptr + rows + 1, rowptr, bind2nd(plus<IT>(), static_cast<IT>(-1))); transform(colids, colids + nnz, colids, bind2nd(plus<IT>(), static_cast<IT>(-1))); zerobased = true; return false; } bool isEmpty() { return (nnz == 0); } NT sumall() { IT sum = 0; #pragma omp parallel for reduction (+:sum) for(IT i=0; i<nnz; ++i ) { sum += values[i]; } return sum; } void Sorted(); void get_grb_mat(GrB_Matrix *A); void get_grb_mat(GrB_Matrix A); void get_grb_mat_ptr(GrB_Matrix *A); // sets CSR object's pointers to NULL IT rows; IT cols; IT nnz; // number of nonzeros IT *rowptr; IT *colids; NT *values; bool zerobased; }; // copy constructor template <class IT, class NT> CSR<IT, NT>::CSR(const CSR<IT, NT> &rhs) : nnz(rhs.nnz), rows(rhs.rows), cols(rhs.cols), zerobased(rhs.zerobased) { if (nnz > 0) { values = my_malloc<NT>(nnz); colids = my_malloc<IT>(nnz); copy(rhs.values, rhs.values + nnz, values); copy(rhs.colids, rhs.colids + nnz, colids); } if (rows > 0) { rowptr = my_malloc<IT>(rows + 1); copy(rhs.rowptr, rhs.rowptr + rows + 1, rowptr); } } template <class IT, class NT> CSR<IT, NT> &CSR<IT, NT>::operator=(const CSR<IT, NT> &rhs) { if (this != &rhs) { if (nnz > 0) // if the existing object is not empty { my_free<IT>(colids); my_free<NT>(values); } if (rows > 0) { my_free<IT>(rowptr); } nnz = rhs.nnz; rows = rhs.rows; cols = rhs.cols; zerobased = rhs.zerobased; if (rhs.nnz > 0) // if the copied object is not empty { values = my_malloc<NT>(nnz); colids = my_malloc<IT>(nnz); copy(rhs.values, rhs.values + nnz, values); copy(rhs.colids, rhs.colids + nnz, colids); } if (rhs.cols > 0) { rowptr = my_malloc<IT>(rows + 1); copy(rhs.rowptr, rhs.rowptr + rows + 1, rowptr); } } return *this; } //! Construct a CSR object from a CSC //! Accepts only zero based CSC inputs template <class IT, class NT> CSR<IT, NT>::CSR(const CSC<IT, NT> &csc) : nnz(csc.nnz), rows(csc.rows), cols(csc.cols), zerobased(true) { rowptr = my_malloc<IT>(rows + 1); colids = my_malloc<IT>(nnz); values = my_malloc<NT>(nnz); IT *work = my_malloc<IT>(rows); std::fill(work, work + rows, (IT)0); // initilized to zero for (IT k = 0; k < nnz; ++k) { IT tmp = csc.rowids[k]; work[tmp]++; // row counts (i.e, w holds the "row difference array") } IT last; if (nnz > 0) { rowptr[rows] = CumulativeSum(work, rows); // cumulative sum of w copy(work, work + rows, rowptr); for (IT i = 0; i < cols; ++i) { for (IT j = csc.colptr[i]; j < csc.colptr[i + 1]; ++j) { // last = work[csc.rowids[j]]++; // colids[last] = i; colids[last = work[csc.rowids[j]]++] = i; values[last] = csc.values[j]; } } } my_free<IT>(work); } template <class IT, class NT> CSR<IT, NT>::CSR(const CSC<IT, NT> &csc, const bool transpose) : nnz(csc.nnz), rows(csc.rows), cols(csc.cols), zerobased(true) { if (!transpose) { rowptr = my_malloc<IT>(rows + 1); colids = my_malloc<IT>(nnz); values = my_malloc<NT>(nnz); IT *work = my_malloc<IT>(rows); std::fill(work, work + rows, (IT)0); // initilized to zero for (IT k = 0; k < nnz; ++k) { IT tmp = csc.rowids[k]; work[tmp]++; // row counts (i.e, w holds the "row difference array") } if (nnz > 0) { rowptr[rows] = CumulativeSum(work, rows); // cumulative sum of w copy(work, work + rows, rowptr); IT last; for (IT i = 0; i < cols; ++i) { for (IT j = csc.colptr[i]; j < csc.colptr[i + 1]; ++j) { colids[last = work[csc.rowids[j]]++] = i; values[last] = csc.values[j]; } } } my_free<IT>(work); } else { rows = csc.cols; cols = csc.rows; rowptr = my_malloc<IT>(rows + 1); colids = my_malloc<IT>(nnz); values = my_malloc<NT>(nnz); for (IT k = 0; k < rows + 1; ++k) { rowptr[k] = csc.colptr[k]; } for (IT k = 0; k < nnz; ++k) { values[k] = csc.values[k]; colids[k] = csc.rowids[k]; } } } template <class IT, class NT> CSR<IT, NT>::CSR(graph &G) : nnz(G.m), rows(G.n), cols(G.n), zerobased(true) { // graph is like a triples object // typedef struct { // LONG_T m; // LONG_T n; // // Arrays of size 'm' storing the edge information // // A directed edge 'e' (0 <= e < m) from start[e] to end[e] // // had an integer weight w[e] // LONG_T* start; // LONG_T* end; // WEIGHT_T* w; // } graph; cout << "Graph nnz= " << G.m << " and n=" << G.n << endl; vector<Triple<IT, NT>> simpleG; vector<pair<pair<IT, IT>, NT>> currCol; currCol.push_back(make_pair(make_pair(G.start[0], G.end[0]), G.w[0])); for (IT k = 0; k < nnz - 1; ++k) { if (G.start[k] != G.start[k + 1]) { std::sort(currCol.begin(), currCol.end()); simpleG.push_back(Triple<IT, NT>( currCol[0].first.first, currCol[0].first.second, currCol[0].second)); for (int i = 0; i < currCol.size() - 1; ++i) { if (currCol[i].first == currCol[i + 1].first) { simpleG.back().val += currCol[i + 1].second; } else { simpleG.push_back(Triple<IT, NT>(currCol[i + 1].first.first, currCol[i + 1].first.second, currCol[i + 1].second)); } } vector<pair<pair<IT, IT>, NT>>().swap(currCol); } currCol.push_back( make_pair(make_pair(G.start[k + 1], G.end[k + 1]), G.w[k + 1])); } // now do the last row sort(currCol.begin(), currCol.end()); simpleG.push_back(Triple<IT, NT>(currCol[0].first.first, currCol[0].first.second, currCol[0].second)); for (int i = 0; i < currCol.size() - 1; ++i) { if (currCol[i].first == currCol[i + 1].first) { simpleG.back().val += currCol[i + 1].second; } else { simpleG.push_back(Triple<IT, NT>(currCol[i + 1].first.first, currCol[i + 1].first.second, currCol[i + 1].second)); } } nnz = simpleG.size(); cout << "[After duplicate merging] Graph nnz= " << nnz << " and n=" << G.n << endl; rowptr = my_malloc<IT>(rows + 1); colids = my_malloc<IT>(nnz); values = my_malloc<NT>(nnz); IT *work = my_malloc<IT>(rows); std::fill(work, work + rows, (IT)0); // initilized to zero for (IT k = 0; k < nnz; ++k) { IT tmp = simpleG[k].row; work[tmp]++; // col counts (i.e, w holds the "col difference array") } if (nnz > 0) { rowptr[rows] = CumulativeSum(work, rows); // cumulative sum of w copy(work, work + rows, rowptr); IT last; for (IT k = 0; k < nnz; ++k) { colids[last = work[simpleG[k].row]++] = simpleG[k].col; values[last] = simpleG[k].val; } } my_free<IT>(work); } // template <class IT, // class NT> // CSR<IT, NT>::CSR (const GrB_Matrix &A) : // zerobased(true) // { // GrB_Index nc, nr, nv; // GrB_Matrix_nrows(&nr, A); // GrB_Matrix_ncols(&nc, A); // GrB_Matrix_nvals(&nv, A); // this->rows = static_cast<IT>(nr); // this->cols = static_cast<IT>(nc); // this->nnz = static_cast<IT>(nv); // // need cast from GrB_Index to IT // GrB_Index *rids = new GrB_Index[nv]; // GrB_Index *cids = new GrB_Index[nv]; // this->rowptr = my_malloc<IT>(this->rows+1); // this->colids = my_malloc<IT>(this->nnz); // this->values = my_malloc<NT>(this->nnz); // GrbMatrixExtractTuples<NT>()(rids, cids, this->values, &nv, A); // assert(nv == this->nnz); // // assume sorted and check it while forming // memset(this->rowptr, 0, sizeof(IT) * (this->rows+1)); // GrB_Index last_rid = -1, last_cid = -1; // for (GrB_Index i = 0; i < nv; ++i) // { // assert(rids[i] >= last_rid && // "row ids are not sorted in the GraphBLAS matrix\n"); // if (rids[i] == last_rid) // assert(cids[i] > last_cid && // "col ids are not sorted in the GraphBLAS matrix\n"); // last_rid = rids[i]; // last_cid = cids[i]; // ++this->rowptr[rids[i]+1]; // this->colids[i] = static_cast<IT>(cids[i]); // } // if (this->rows > 0) // std::inclusive_scan(this->rowptr+1, this->rowptr+this->rows+1, // this->rowptr+1); // delete [] rids; // delete [] cids; // } template <class IT, class NT> CSR<IT, NT>::CSR (GrB_Matrix A) : zerobased(true) { static_assert(std::is_same<IT, GrB_Index>::value, "CSR matrix index type and GrB_Matrix index type " "must be the same"); bool is_iso, is_jumbled; GrB_Index ap_size, aj_size, ax_size; GrB_Index nr, nc, nnz; GrB_Descriptor desc = NULL; GrB_Descriptor_new(&desc); GrB_Matrix_nrows(&nr, A); GrB_Matrix_ncols(&nc, A); GrB_Matrix_nvals(&nnz, A); this->rows = nr; this->cols = nc; this->nnz = nnz; // does not free the matrix, but the matrix has no entries after this GxB_Matrix_unpack_CSR(A, &this->rowptr, &this->colids, (void **)&this->values, &ap_size, &aj_size, &ax_size, &is_iso, &is_jumbled, desc); assert(!is_iso && "GraphBLAS matrix is iso-valued."); assert(!is_jumbled && "GraphBLAS matrix is not sorted\n"); GrB_Descriptor_free(&desc); return; } template <class IT, class NT> CSR<IT, NT>::CSR (GrB_Matrix *A, bool dup_mat) : zerobased(true) { static_assert(std::is_same<IT, GrB_Index>::value, "CSR matrix index type and GrB_Matrix index type " "must be the same"); GrB_Matrix tmp = *A; // shallow if (dup_mat) GrB_Matrix_dup(&tmp, *A); // deep GrB_Type nz_type; bool is_uniform, is_jumbled; GrB_Index ap_size, aj_size, ax_size; GrB_Descriptor desc = NULL; GrB_Descriptor_new(&desc); GxB_Matrix_type(&nz_type, tmp); GrB_Matrix_nvals(&this->nnz, tmp); GxB_Matrix_export_CSR(&tmp, &nz_type, &this->rows, &this->cols, &this->rowptr, &this->colids, (void **)&this->values, &ap_size, &aj_size, &ax_size, &is_uniform, &is_jumbled, desc); // frees the graphblas matrix assert(!is_jumbled && "GraphBLAS matrix is not sorted\n"); return; } // check if sorted within rows? template <class IT, class NT> void CSR<IT, NT>::Sorted() { bool sorted = true; for (IT i = 0; i < rows; ++i) { sorted &= my_is_sorted(colids + rowptr[i], colids + rowptr[i + 1], std::less<IT>()); } cout << "CSR graph is sorted by column id: "<< sorted << endl; } template <class IT, class NT> bool CSR<IT, NT>::operator==(const CSR<IT, NT> &rhs) { bool same; if (nnz != rhs.nnz || rows != rhs.rows || cols != rhs.cols) { printf("%d:%d, %d:%d, %d:%d\n", nnz, rhs.nnz, rows, rhs.rows, cols, rhs.cols); return false; } if (zerobased != rhs.zerobased) { IT *tmp_rowptr = my_malloc<IT>(rows + 1); IT *tmp_colids = my_malloc<IT>(nnz); if (!zerobased) { for (int i = 0; i < rows + 1; ++i) { tmp_rowptr[i] = rowptr[i] - 1; } for (int i = 0; i < nnz; ++i) { tmp_colids[i] = colids[i] - 1; } same = std::equal(tmp_rowptr, tmp_rowptr + rows + 1, rhs.rowptr); same = same && std::equal(tmp_colids, tmp_colids + nnz, rhs.colids); } else if (!rhs.zerobased) { for (int i = 0; i < rows + 1; ++i) { tmp_rowptr[i] = rhs.rowptr[i] - 1; } for (int i = 0; i < nnz; ++i) { tmp_colids[i] = rhs.colids[i] - 1; } same = std::equal(tmp_rowptr, tmp_rowptr + rows + 1, rowptr); same = same && std::equal(tmp_colids, tmp_colids + nnz, colids); } my_free<IT>(tmp_rowptr); my_free<IT>(tmp_colids); } else { same = std::equal(rowptr, rowptr + rows + 1, rhs.rowptr); same = same && std::equal(colids, colids + nnz, rhs.colids); } bool samebefore = same; ErrorTolerantEqual<NT> epsilonequal(EPSILON); same = same && std::equal(values, values + nnz, rhs.values, epsilonequal); if (samebefore && (!same)) { #ifdef DEBUG vector<NT> error(nnz); transform(values, values + nnz, rhs.values, error.begin(), absdiff<NT>()); vector<pair<NT, NT>> error_original_pair(nnz); for (IT i = 0; i < nnz; ++i) error_original_pair[i] = make_pair(error[i], values[i]); if (error_original_pair.size() > 10) { // otherwise would crush for small data partial_sort(error_original_pair.begin(), error_original_pair.begin() + 10, error_original_pair.end(), greater<pair<NT, NT>>()); cout << "Highest 10 different entries are: " << endl; for (IT i = 0; i < 10; ++i) cout << "Diff: " << error_original_pair[i].first << " on " << error_original_pair[i].second << endl; } else { sort(error_original_pair.begin(), error_original_pair.end(), greater<pair<NT, NT>>()); cout << "Highest different entries are: " << endl; for (typename vector<pair<NT, NT>>::iterator it = error_original_pair.begin(); it != error_original_pair.end(); ++it) cout << "Diff: " << it->first << " on " << it->second << endl; } #endif } return same; } template <class IT, class NT> CSR<IT, NT>::CSR(const string filename) : zerobased(true) { IT i; bool isUnsy; IT num, offset, tmp_nz; char *line, *ch; FILE *fp; IT *col_coo, *row_coo; NT *val_coo; IT *each_row_index; IT *nnz_num; const int LINE_LENGTH_MAX = 256; isUnsy = false; line = (char *)malloc(sizeof(char) * LINE_LENGTH_MAX); /* Open File */ fp = fopen(filename.c_str(), "r"); if (fp == NULL) { exit(1); } do { fgets(line, LINE_LENGTH_MAX, fp); if (strstr(line, "general")) { isUnsy = true; } } while (line[0] == '%'); /* Get size info */ sscanf(line, "%d %d %d", &rows, &cols, &tmp_nz); /* Store in COO format */ num = 0; col_coo = (IT *)malloc(sizeof(IT) * (tmp_nz)); row_coo = (IT *)malloc(sizeof(IT) * (tmp_nz)); val_coo = (NT *)malloc(sizeof(NT) * (tmp_nz)); while (fgets(line, LINE_LENGTH_MAX, fp)) { ch = line; /* Read first word (row id)*/ row_coo[num] = (IT)(atoi(ch) - 1); ch = strchr(ch, ' '); ch++; /* Read second word (column id)*/ col_coo[num] = (IT)(atoi(ch) - 1); ch = strchr(ch, ' '); if (ch != NULL) { ch++; /* Read third word (value data)*/ val_coo[num] = (NT)atof(ch); ch = strchr(ch, ' '); } else { val_coo[num] = 1.0; } num++; } fclose(fp); /* Count the number of non-zero in each row */ nnz_num = (IT *)malloc(sizeof(IT) * rows); for (i = 0; i < rows; i++) { nnz_num[i] = 0; } for (i = 0; i < num; i++) { nnz_num[row_coo[i]]++; if (col_coo[i] != row_coo[i] && isUnsy == false) { nnz_num[col_coo[i]]++; (tmp_nz)++; } } nnz = tmp_nz; /* Allocation of rpt, col, val */ rowptr = my_malloc<IT>(sizeof(IT) * (rows + 1)); colids = my_malloc<IT>(sizeof(IT) * (nnz)); values = my_malloc<NT>(sizeof(NT) * (nnz)); offset = 0; for (i = 0; i < rows; i++) { rowptr[i] = offset; offset += nnz_num[i]; } rowptr[rows] = offset; each_row_index = (IT *)malloc(sizeof(IT) * rows); for (i = 0; i < rows; i++) { each_row_index[i] = 0; } for (i = 0; i < num; i++) { colids[rowptr[row_coo[i]] + each_row_index[row_coo[i]]] = col_coo[i]; values[rowptr[row_coo[i]] + each_row_index[row_coo[i]]++] = val_coo[i]; if (col_coo[i] != row_coo[i] && isUnsy == false) { colids[rowptr[col_coo[i]] + each_row_index[col_coo[i]]] = row_coo[i]; values[rowptr[col_coo[i]] + each_row_index[col_coo[i]]++] = val_coo[i]; } } free(line); free(nnz_num); free(row_coo); free(col_coo); free(val_coo); free(each_row_index); } template <class IT, class NT> void CSR<IT, NT>::shuffleIds() { mt19937_64 mt(0); for (IT i = 0; i < rows; ++i) { IT offset = rowptr[i]; IT width = rowptr[i + 1] - rowptr[i]; uniform_int_distribution<IT> rand_scale(0, width - 1); for (IT j = rowptr[i]; j < rowptr[i + 1]; ++j) { IT target = rand_scale(mt); IT tmpId = colids[offset + target]; NT tmpVal = values[offset + target]; colids[offset + target] = colids[j]; values[offset + target] = values[j]; colids[j] = tmpId; values[j] = tmpVal; } } } template <class IT, class NT> void CSR<IT,NT>::sortIds() { #pragma omp parallel for for (IT i = 0; i < rows; ++i) { vector< pair<IT,NT> > tosort; for (IT j = rowptr[i]; j < rowptr[i+1]; ++j) { tosort.push_back(make_pair(colids[j], values[j])); } std::sort(tosort.begin(), tosort.end()); auto begitr = tosort.begin(); for (IT j = rowptr[i]; j < rowptr[i+1]; ++j) { colids[j] = begitr->first; values[j] = begitr->second; ++begitr; } } } // A and B has to have sorted column ids // Output will naturally have sorted ids template <typename IT, typename NT, typename AddOperation> CSR<IT,NT> Intersect(const CSR<IT,NT> & A, const CSR<IT,NT> & B, AddOperation addop) { CSR<IT,NT> C; if (A.rows != B.rows || A.cols != B.cols) { std::cout << "Can not intersect due to dimension mismatch... " << A.rows << ":" << B.rows << ", " << A.cols << ":" << B.cols << std::endl; return C; } C.rows = A.rows; C.cols = A.cols; C.zerobased = A.zerobased; C.rowptr = my_malloc<IT>(C.rows + 1); IT * row_nz = my_malloc<IT>(C.rows); vector<vector<IT>> vec_colids(C.rows); vector<vector<NT>> vec_values(C.rows); #pragma omp parallel for for(size_t i=0; i< A.rows; ++i) { IT acur = A.rowptr[i]; IT aend = A.rowptr[i+1]; IT bcur = B.rowptr[i]; IT bend = B.rowptr[i+1]; while(acur != aend && bcur != bend) { if(A.colids[acur] < B.colids[bcur]) ++acur; else if(A.colids[acur] > B.colids[bcur]) ++bcur; else // they are equal { vec_colids[i].push_back(A.colids[acur]); vec_values[i].push_back(addop(A.values[acur], B.values[bcur])); ++acur; ++bcur; } } row_nz[i] = vec_colids[i].size(); } scan(row_nz, C.rowptr, C.rows + 1); my_free<IT>(row_nz); C.nnz = C.rowptr[C.rows]; C.colids = my_malloc<IT>(C.nnz); C.values = my_malloc<NT>(C.nnz); #pragma omp parallel for for(size_t i=0; i< C.rows; ++i) { std::copy(vec_colids[i].begin(), vec_colids[i].end(), C.colids + C.rowptr[i]); std::copy(vec_values[i].begin(), vec_values[i].end(), C.values + C.rowptr[i]); } return C; } template <typename IT, typename NT> void CSR<IT, NT>::get_grb_mat ( GrB_Matrix *A ) { GrB_Index *rinds = new GrB_Index[this->nnz]; GrB_Index *cinds = new GrB_Index[this->nnz]; GrB_Index i = 0; int decr = 1 - this->zerobased; for (IT r = 0; r < this->rows; ++r) { for (IT cidx = this->rowptr[r]; cidx < this->rowptr[r+1]; ++cidx) { rinds[i] = static_cast<GrB_Index>(r-decr); cinds[i++] = static_cast<GrB_Index>(this->colids[cidx]-decr); } } if (A != NULL) { GrB_Matrix_clear(*A); *A = NULL; } GrbMatrixBuild<NT>()(A, rinds, cinds, this->values, this->rows, this->cols, this->nnz); GrB_Index nr, nc, nv; GrB_Matrix_nrows(&nr, *A); GrB_Matrix_ncols(&nc, *A); GrB_Matrix_nvals(&nv, *A); // cout << "GrB Matrix: " << nr << " " << nc << " " << nv << endl; delete [] rinds; delete [] cinds; return; } template <typename IT, typename NT> void CSR<IT, NT>::get_grb_mat ( GrB_Matrix A ) { assert(A != NULL && "GraphBLAS matrix to be packed is NULL!"); GrB_Index nr, nc; GrB_Matrix_nrows(&nr, A); GrB_Matrix_ncols(&nc, A); assert(nr == this->rows && nc == this->cols && "Dimension mismatch in converting CSR matrix to GraphBLAS matrix."); bool is_iso = false, is_jumbled = false; GrB_Index ap_size = sizeof(IT) * (this->rows+1), aj_size = sizeof(IT) * this->nnz, ax_size = sizeof(NT) * this->nnz; GrB_Descriptor desc = NULL; GrB_Descriptor_new(&desc); GxB_Matrix_pack_CSR(A, &this->rowptr, &this->colids, (void **)&this->values, ap_size, aj_size, ax_size, is_iso, is_jumbled, desc); assert(this->rowptr == NULL && this->colids == NULL && this->values == NULL); GrB_Descriptor_free(&desc); return; } template <typename IT, typename NT> void CSR<IT, NT>::get_grb_mat_ptr ( GrB_Matrix *A ) { static_assert(std::is_same<IT, GrB_Index>::value, "CSR matrix index type and GrB_Matrix index type " "must be the same"); GrbAlgObj<NT> to_grb; GrB_Descriptor desc = NULL; // make sure CSR object is sorted GxB_Matrix_import_CSR(A, to_grb.get_type(), this->rows, this->cols, &this->rowptr, &this->colids, (void **)&this->values, sizeof(IT)*(this->rows+1), sizeof(NT)*this->nnz, sizeof(NT)*this->nnz, false, false, desc); return; } #endif
offloading_success.c
// RUN: %libomptarget-compile-run-and-check-generic #include <stdio.h> #include <omp.h> int main(void) { int isHost = -1; #pragma omp target map(from: isHost) { isHost = omp_is_initial_device(); } if (isHost < 0) { printf("Runtime error, isHost=%d\n", isHost); } // CHECK: Target region executed on the device printf("Target region executed on the %s\n", isHost ? "host" : "device"); return isHost; }
pr32362-3.c
/* PR middle-end/32362 */ /* { dg-do run } */ /* { dg-options "-O2" } */ #include <omp.h> #include <stdlib.h> int a = 2; int main () { int n[4] = { -1, -1, -1, -1 }; int b = 4; omp_set_num_threads (4); omp_set_dynamic (0); omp_set_nested (1); #pragma omp parallel private(b) { b = omp_get_thread_num (); #pragma omp parallel firstprivate(a) { a = (omp_get_thread_num () + a) + 1; if (b == omp_get_thread_num ()) n[omp_get_thread_num ()] = a + (b << 4); } } if (n[0] != 3) abort (); if (n[3] != -1 && (n[1] != 0x14 || n[2] != 0x25 || n[3] != 0x36)) abort (); return 0; }
fib.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include "bots.h" #include "fib.h" #define FIB_RESULTS_PRE 41 long long fib_results[FIB_RESULTS_PRE] = {0,1,1,2,3,5,8,13,21,34,55,89,144,233,377,610,987,1597,2584,4181,6765,10946,17711,28657,46368,75025,121393,196418,317811,514229,832040,1346269,2178309,3524578,5702887,9227465,14930352,24157817,39088169,63245986,102334155}; long long fib_seq (int n) { int x, y; if (n < 2) return n; x = fib_seq(n - 1); y = fib_seq(n - 2); return x + y; } #if defined(IF_CUTOFF) long long fib (int n,int d) { long long x, y; if (n < 2) return n; #pragma omp task shared(x) firstprivate(n) if(d < bots_cutoff_value) x = fib(n - 1,d+1); #pragma omp task shared(y) firstprivate(n) if(d < bots_cutoff_value) y = fib(n - 2,d+1); #pragma omp taskwait return x + y; } #elif defined(FINAL_CUTOFF) long long fib (int n,int d) { long long x, y; if (n < 2) return n; #pragma omp task shared(x) firstprivate(n) final(d+1 >= bots_cutoff_value) mergeable x = fib(n - 1,d+1); #pragma omp task shared(y) firstprivate(n) final(d+1 >= bots_cutoff_value) mergeable y = fib(n - 2,d+1); #pragma omp taskwait return x + y; } #elif defined(MANUAL_CUTOFF) long long fib (int n, int d) { long long x, y; if (n < 2) return n; if ( d < bots_cutoff_value ) { #pragma omp task shared(x) firstprivate(n) x = fib(n - 1,d+1); #pragma omp task shared(y) firstprivate(n) y = fib(n - 2,d+1); #pragma omp taskwait } else { x = fib_seq(n-1); y = fib_seq(n-2); } return x + y; } #else long long fib (int n) { long long x, y; if (n < 2) return n; #pragma omp task shared(x) firstprivate(n) x = fib(n - 1); #pragma omp task shared(y) firstprivate(n) y = fib(n - 2); #pragma omp taskwait return x + y; } #endif static long long par_res, seq_res; void fib0 (int n) { #pragma omp parallel #pragma omp master { #if defined(MANUAL_CUTOFF) || defined(IF_CUTOFF) || defined(FINAL_CUTOFF) par_res = fib(n,0); #else par_res = fib(n); #endif #pragma omp taskwait } bots_message("Fibonacci result for %d is %lld\n",n,par_res); } void fib0_seq (int n) { seq_res = fib_seq(n); bots_message("Fibonacci result for %d is %lld\n",n,seq_res); } long long fib_verify_value(int n) { if (n < FIB_RESULTS_PRE) return fib_results[n]; return ( fib_verify_value(n-1) + fib_verify_value(n-2)); } int fib_verify (int n) { int result; if (bots_sequential_flag) { if (par_res == seq_res) result = BOTS_RESULT_SUCCESSFUL; else result = BOTS_RESULT_UNSUCCESSFUL; } else { seq_res = fib_verify_value(n); if (par_res == seq_res) result = BOTS_RESULT_SUCCESSFUL; else result = BOTS_RESULT_UNSUCCESSFUL; } return result; }
divsufsort.c
/* * divsufsort.c for libdivsufsort-lite * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP # include <omp.h> #endif #include "divsufsort.h" /*- Constants -*/ #define INLINE __inline #if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1) # undef ALPHABET_SIZE #endif #if !defined(ALPHABET_SIZE) # define ALPHABET_SIZE (256) #endif #define BUCKET_A_SIZE (ALPHABET_SIZE) #define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE) #if defined(SS_INSERTIONSORT_THRESHOLD) # if SS_INSERTIONSORT_THRESHOLD < 1 # undef SS_INSERTIONSORT_THRESHOLD # define SS_INSERTIONSORT_THRESHOLD (1) # endif #else # define SS_INSERTIONSORT_THRESHOLD (8) #endif #if defined(SS_BLOCKSIZE) # if SS_BLOCKSIZE < 0 # undef SS_BLOCKSIZE # define SS_BLOCKSIZE (0) # elif 32768 <= SS_BLOCKSIZE # undef SS_BLOCKSIZE # define SS_BLOCKSIZE (32767) # endif #else # define SS_BLOCKSIZE (1024) #endif /* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */ #if SS_BLOCKSIZE == 0 # define SS_MISORT_STACKSIZE (96) #elif SS_BLOCKSIZE <= 4096 # define SS_MISORT_STACKSIZE (16) #else # define SS_MISORT_STACKSIZE (24) #endif #define SS_SMERGE_STACKSIZE (32) #define TR_INSERTIONSORT_THRESHOLD (8) #define TR_STACKSIZE (64) /*- Macros -*/ #ifndef SWAP # define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0) #endif /* SWAP */ #ifndef MIN # define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b)) #endif /* MIN */ #ifndef MAX # define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b)) #endif /* MAX */ #define STACK_PUSH(_a, _b, _c, _d)\ do {\ assert(ssize < STACK_SIZE);\ stack[ssize].a = (_a), stack[ssize].b = (_b),\ stack[ssize].c = (_c), stack[ssize++].d = (_d);\ } while(0) #define STACK_PUSH5(_a, _b, _c, _d, _e)\ do {\ assert(ssize < STACK_SIZE);\ stack[ssize].a = (_a), stack[ssize].b = (_b),\ stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\ } while(0) #define STACK_POP(_a, _b, _c, _d)\ do {\ assert(0 <= ssize);\ if(ssize == 0) { return; }\ (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\ (_c) = stack[ssize].c, (_d) = stack[ssize].d;\ } while(0) #define STACK_POP5(_a, _b, _c, _d, _e)\ do {\ assert(0 <= ssize);\ if(ssize == 0) { return; }\ (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\ (_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\ } while(0) #define BUCKET_A(_c0) bucket_A[(_c0)] #if ALPHABET_SIZE == 256 #define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)]) #define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)]) #else #define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)]) #define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)]) #endif /*- Private Functions -*/ static const int lg_table[256]= { -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 }; #if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) static INLINE int ss_ilg(int n) { #if SS_BLOCKSIZE == 0 return (n & 0xffff0000) ? ((n & 0xff000000) ? 24 + lg_table[(n >> 24) & 0xff] : 16 + lg_table[(n >> 16) & 0xff]) : ((n & 0x0000ff00) ? 8 + lg_table[(n >> 8) & 0xff] : 0 + lg_table[(n >> 0) & 0xff]); #elif SS_BLOCKSIZE < 256 return lg_table[n]; #else return (n & 0xff00) ? 8 + lg_table[(n >> 8) & 0xff] : 0 + lg_table[(n >> 0) & 0xff]; #endif } #endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */ #if SS_BLOCKSIZE != 0 static const int sqq_table[256] = { 0, 16, 22, 27, 32, 35, 39, 42, 45, 48, 50, 53, 55, 57, 59, 61, 64, 65, 67, 69, 71, 73, 75, 76, 78, 80, 81, 83, 84, 86, 87, 89, 90, 91, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168, 169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180, 181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191, 192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201, 202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211, 212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221, 221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230, 230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238, 239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247, 247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255 }; static INLINE int ss_isqrt(int x) { int y, e; if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; } e = (x & 0xffff0000) ? ((x & 0xff000000) ? 24 + lg_table[(x >> 24) & 0xff] : 16 + lg_table[(x >> 16) & 0xff]) : ((x & 0x0000ff00) ? 8 + lg_table[(x >> 8) & 0xff] : 0 + lg_table[(x >> 0) & 0xff]); if(e >= 16) { y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7); if(e >= 24) { y = (y + 1 + x / y) >> 1; } y = (y + 1 + x / y) >> 1; } else if(e >= 8) { y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1; } else { return sqq_table[x] >> 4; } return (x < (y * y)) ? y - 1 : y; } #endif /* SS_BLOCKSIZE != 0 */ /*---------------------------------------------------------------------------*/ /* Compares two suffixes. */ static INLINE int ss_compare(const unsigned char *T, const int *p1, const int *p2, int depth) { const unsigned char *U1, *U2, *U1n, *U2n; for(U1 = T + depth + *p1, U2 = T + depth + *p2, U1n = T + *(p1 + 1) + 2, U2n = T + *(p2 + 1) + 2; (U1 < U1n) && (U2 < U2n) && (*U1 == *U2); ++U1, ++U2) { } return U1 < U1n ? (U2 < U2n ? *U1 - *U2 : 1) : (U2 < U2n ? -1 : 0); } /*---------------------------------------------------------------------------*/ #if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) /* Insertionsort for small size groups */ static void ss_insertionsort(const unsigned char *T, const int *PA, int *first, int *last, int depth) { int *i, *j; int t; int r; for(i = last - 2; first <= i; --i) { for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) { do { *(j - 1) = *j; } while((++j < last) && (*j < 0)); if(last <= j) { break; } } if(r == 0) { *j = ~*j; } *(j - 1) = t; } } #endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */ /*---------------------------------------------------------------------------*/ #if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) static INLINE void ss_fixdown(const unsigned char *Td, const int *PA, int *SA, int i, int size) { int j, k; int v; int c, d, e; for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) { d = Td[PA[SA[k = j++]]]; if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; } if(d <= c) { break; } } SA[i] = v; } /* Simple top-down heapsort. */ static void ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) { int i, m; int t; m = size; if((size % 2) == 0) { m--; if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); } } for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); } if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); } for(i = m - 1; 0 < i; --i) { t = SA[0], SA[0] = SA[i]; ss_fixdown(Td, PA, SA, 0, i); SA[i] = t; } } /*---------------------------------------------------------------------------*/ /* Returns the median of three elements. */ static INLINE int * ss_median3(const unsigned char *Td, const int *PA, int *v1, int *v2, int *v3) { int *t; if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); } if(Td[PA[*v2]] > Td[PA[*v3]]) { if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; } else { return v3; } } return v2; } /* Returns the median of five elements. */ static INLINE int * ss_median5(const unsigned char *Td, const int *PA, int *v1, int *v2, int *v3, int *v4, int *v5) { int *t; if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); } if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); } if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); } if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); } if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); } if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; } return v3; } /* Returns the pivot element. */ static INLINE int * ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) { int *middle; int t; t = last - first; middle = first + t / 2; if(t <= 512) { if(t <= 32) { return ss_median3(Td, PA, first, middle, last - 1); } else { t >>= 2; return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1); } } t >>= 3; first = ss_median3(Td, PA, first, first + t, first + (t << 1)); middle = ss_median3(Td, PA, middle - t, middle, middle + t); last = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1); return ss_median3(Td, PA, first, middle, last); } /*---------------------------------------------------------------------------*/ /* Binary partition for substrings. */ static INLINE int * ss_partition(const int *PA, int *first, int *last, int depth) { int *a, *b; int t; for(a = first - 1, b = last;;) { for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; } for(; (a < --b) && ((PA[*b] + depth) < (PA[*b + 1] + 1));) { } if(b <= a) { break; } t = ~*b; *b = *a; *a = t; } if(first < a) { *first = ~*first; } return a; } /* Multikey introsort for medium size groups. */ static void ss_mintrosort(const unsigned char *T, const int *PA, int *first, int *last, int depth) { #define STACK_SIZE SS_MISORT_STACKSIZE struct { int *a, *b, c; int d; } stack[STACK_SIZE]; const unsigned char *Td; int *a, *b, *c, *d, *e, *f; int s, t; int ssize; int limit; int v, x = 0; for(ssize = 0, limit = ss_ilg(last - first);;) { if((last - first) <= SS_INSERTIONSORT_THRESHOLD) { #if 1 < SS_INSERTIONSORT_THRESHOLD if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); } #endif STACK_POP(first, last, depth, limit); continue; } Td = T + depth; if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); } if(limit < 0) { for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) { if((x = Td[PA[*a]]) != v) { if(1 < (a - first)) { break; } v = x; first = a; } } if(Td[PA[*first] - 1] < v) { first = ss_partition(PA, first, a, depth); } if((a - first) <= (last - a)) { if(1 < (a - first)) { STACK_PUSH(a, last, depth, -1); last = a, depth += 1, limit = ss_ilg(a - first); } else { first = a, limit = -1; } } else { if(1 < (last - a)) { STACK_PUSH(first, a, depth + 1, ss_ilg(a - first)); first = a, limit = -1; } else { last = a, depth += 1, limit = ss_ilg(a - first); } } continue; } /* choose pivot */ a = ss_pivot(Td, PA, first, last); v = Td[PA[*a]]; SWAP(*first, *a); /* partition */ for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { } if(((a = b) < last) && (x < v)) { for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } } for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { } if((b < (d = c)) && (x > v)) { for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } for(; b < c;) { SWAP(*b, *c); for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } if(a <= d) { c = b - 1; if((s = a - first) > (t = b - a)) { s = t; } for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } if((s = d - c) > (t = last - d - 1)) { s = t; } for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } a = first + (b - a), c = last - (d - c); b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth); if((a - first) <= (last - c)) { if((last - c) <= (c - b)) { STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); STACK_PUSH(c, last, depth, limit); last = a; } else if((a - first) <= (c - b)) { STACK_PUSH(c, last, depth, limit); STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); last = a; } else { STACK_PUSH(c, last, depth, limit); STACK_PUSH(first, a, depth, limit); first = b, last = c, depth += 1, limit = ss_ilg(c - b); } } else { if((a - first) <= (c - b)) { STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); STACK_PUSH(first, a, depth, limit); first = c; } else if((last - c) <= (c - b)) { STACK_PUSH(first, a, depth, limit); STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); first = c; } else { STACK_PUSH(first, a, depth, limit); STACK_PUSH(c, last, depth, limit); first = b, last = c, depth += 1, limit = ss_ilg(c - b); } } } else { limit += 1; if(Td[PA[*first] - 1] < v) { first = ss_partition(PA, first, last, depth); limit = ss_ilg(last - first); } depth += 1; } } #undef STACK_SIZE } #endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */ /*---------------------------------------------------------------------------*/ #if SS_BLOCKSIZE != 0 static INLINE void ss_blockswap(int *a, int *b, int n) { int t; for(; 0 < n; --n, ++a, ++b) { t = *a, *a = *b, *b = t; } } static INLINE void ss_rotate(int *first, int *middle, int *last) { int *a, *b, t; int l, r; l = middle - first, r = last - middle; for(; (0 < l) && (0 < r);) { if(l == r) { ss_blockswap(first, middle, l); break; } if(l < r) { a = last - 1, b = middle - 1; t = *a; do { *a-- = *b, *b-- = *a; if(b < first) { *a = t; last = a; if((r -= l + 1) <= l) { break; } a -= 1, b = middle - 1; t = *a; } } while(1); } else { a = first, b = middle; t = *a; do { *a++ = *b, *b++ = *a; if(last <= b) { *a = t; first = a + 1; if((l -= r + 1) <= r) { break; } a += 1, b = middle; t = *a; } } while(1); } } } /*---------------------------------------------------------------------------*/ static void ss_inplacemerge(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int depth) { const int *p; int *a, *b; int len, half; int q, r; int x; for(;;) { if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); } else { x = 0; p = PA + *(last - 1); } for(a = first, len = middle - first, half = len >> 1, r = -1; 0 < len; len = half, half >>= 1) { b = a + half; q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth); if(q < 0) { a = b + 1; half -= (len & 1) ^ 1; } else { r = q; } } if(a < middle) { if(r == 0) { *a = ~*a; } ss_rotate(a, middle, last); last -= middle - a; middle = a; if(first == middle) { break; } } --last; if(x != 0) { while(*--last < 0) { } } if(middle == last) { break; } } } /*---------------------------------------------------------------------------*/ /* Merge-forward with internal buffer. */ static void ss_mergeforward(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int depth) { int *a, *b, *c, *bufend; int t; int r; bufend = buf + (middle - first) - 1; ss_blockswap(buf, first, middle - first); for(t = *(a = first), b = buf, c = middle;;) { r = ss_compare(T, PA + *b, PA + *c, depth); if(r < 0) { do { *a++ = *b; if(bufend <= b) { *bufend = t; return; } *b++ = *a; } while(*b < 0); } else if(r > 0) { do { *a++ = *c, *c++ = *a; if(last <= c) { while(b < bufend) { *a++ = *b, *b++ = *a; } *a = *b, *b = t; return; } } while(*c < 0); } else { *c = ~*c; do { *a++ = *b; if(bufend <= b) { *bufend = t; return; } *b++ = *a; } while(*b < 0); do { *a++ = *c, *c++ = *a; if(last <= c) { while(b < bufend) { *a++ = *b, *b++ = *a; } *a = *b, *b = t; return; } } while(*c < 0); } } } /* Merge-backward with internal buffer. */ static void ss_mergebackward(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int depth) { const int *p1, *p2; int *a, *b, *c, *bufend; int t; int r; int x; bufend = buf + (last - middle) - 1; ss_blockswap(buf, middle, last - middle); x = 0; if(*bufend < 0) { p1 = PA + ~*bufend; x |= 1; } else { p1 = PA + *bufend; } if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; } else { p2 = PA + *(middle - 1); } for(t = *(a = last - 1), b = bufend, c = middle - 1;;) { r = ss_compare(T, p1, p2, depth); if(0 < r) { if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; } *a-- = *b; if(b <= buf) { *buf = t; break; } *b-- = *a; if(*b < 0) { p1 = PA + ~*b; x |= 1; } else { p1 = PA + *b; } } else if(r < 0) { if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; } *a-- = *c, *c-- = *a; if(c < first) { while(buf < b) { *a-- = *b, *b-- = *a; } *a = *b, *b = t; break; } if(*c < 0) { p2 = PA + ~*c; x |= 2; } else { p2 = PA + *c; } } else { if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; } *a-- = ~*b; if(b <= buf) { *buf = t; break; } *b-- = *a; if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; } *a-- = *c, *c-- = *a; if(c < first) { while(buf < b) { *a-- = *b, *b-- = *a; } *a = *b, *b = t; break; } if(*b < 0) { p1 = PA + ~*b; x |= 1; } else { p1 = PA + *b; } if(*c < 0) { p2 = PA + ~*c; x |= 2; } else { p2 = PA + *c; } } } } /* D&C based merge. */ static void ss_swapmerge(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int bufsize, int depth) { #define STACK_SIZE SS_SMERGE_STACKSIZE #define GETIDX(a) ((0 <= (a)) ? (a) : (~(a))) #define MERGE_CHECK(a, b, c)\ do {\ if(((c) & 1) ||\ (((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\ *(a) = ~*(a);\ }\ if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\ *(b) = ~*(b);\ }\ } while(0) struct { int *a, *b, *c; int d; } stack[STACK_SIZE]; int *l, *r, *lm, *rm; int m, len, half; int ssize; int check, next; for(check = 0, ssize = 0;;) { if((last - middle) <= bufsize) { if((first < middle) && (middle < last)) { ss_mergebackward(T, PA, first, middle, last, buf, depth); } MERGE_CHECK(first, last, check); STACK_POP(first, middle, last, check); continue; } if((middle - first) <= bufsize) { if(first < middle) { ss_mergeforward(T, PA, first, middle, last, buf, depth); } MERGE_CHECK(first, last, check); STACK_POP(first, middle, last, check); continue; } for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1; 0 < len; len = half, half >>= 1) { if(ss_compare(T, PA + GETIDX(*(middle + m + half)), PA + GETIDX(*(middle - m - half - 1)), depth) < 0) { m += half + 1; half -= (len & 1) ^ 1; } } if(0 < m) { lm = middle - m, rm = middle + m; ss_blockswap(lm, middle, m); l = r = middle, next = 0; if(rm < last) { if(*rm < 0) { *rm = ~*rm; if(first < lm) { for(; *--l < 0;) { } next |= 4; } next |= 1; } else if(first < lm) { for(; *r < 0; ++r) { } next |= 2; } } if((l - first) <= (last - r)) { STACK_PUSH(r, rm, last, (next & 3) | (check & 4)); middle = lm, last = l, check = (check & 3) | (next & 4); } else { if((next & 2) && (r == middle)) { next ^= 6; } STACK_PUSH(first, lm, l, (check & 3) | (next & 4)); first = r, middle = rm, check = (next & 3) | (check & 4); } } else { if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) { *middle = ~*middle; } MERGE_CHECK(first, last, check); STACK_POP(first, middle, last, check); } } #undef STACK_SIZE } #endif /* SS_BLOCKSIZE != 0 */ /*---------------------------------------------------------------------------*/ /* Substring sort */ static void sssort(const unsigned char *T, const int *PA, int *first, int *last, int *buf, int bufsize, int depth, int n, int lastsuffix) { int *a; #if SS_BLOCKSIZE != 0 int *b, *middle, *curbuf; int j, k, curbufsize, limit; #endif int i; if(lastsuffix != 0) { ++first; } #if SS_BLOCKSIZE == 0 ss_mintrosort(T, PA, first, last, depth); #else if((bufsize < SS_BLOCKSIZE) && (bufsize < (last - first)) && (bufsize < (limit = ss_isqrt(last - first)))) { if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; } buf = middle = last - limit, bufsize = limit; } else { middle = last, limit = 0; } for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) { #if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth); #elif 1 < SS_BLOCKSIZE ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth); #endif curbufsize = last - (a + SS_BLOCKSIZE); curbuf = a + SS_BLOCKSIZE; if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; } for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) { ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth); } } #if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE ss_mintrosort(T, PA, a, middle, depth); #elif 1 < SS_BLOCKSIZE ss_insertionsort(T, PA, a, middle, depth); #endif for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) { if(i & 1) { ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth); a -= k; } } if(limit != 0) { #if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE ss_mintrosort(T, PA, middle, last, depth); #elif 1 < SS_BLOCKSIZE ss_insertionsort(T, PA, middle, last, depth); #endif ss_inplacemerge(T, PA, first, middle, last, depth); } #endif if(lastsuffix != 0) { /* Insert last type B* suffix. */ int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2; for(a = first, i = *(first - 1); (a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth))); ++a) { *(a - 1) = *a; } *(a - 1) = i; } } /*---------------------------------------------------------------------------*/ static INLINE int tr_ilg(int n) { return (n & 0xffff0000) ? ((n & 0xff000000) ? 24 + lg_table[(n >> 24) & 0xff] : 16 + lg_table[(n >> 16) & 0xff]) : ((n & 0x0000ff00) ? 8 + lg_table[(n >> 8) & 0xff] : 0 + lg_table[(n >> 0) & 0xff]); } /*---------------------------------------------------------------------------*/ /* Simple insertionsort for small size groups. */ static void tr_insertionsort(const int *ISAd, int *first, int *last) { int *a, *b; int t, r; for(a = first + 1; a < last; ++a) { for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) { do { *(b + 1) = *b; } while((first <= --b) && (*b < 0)); if(b < first) { break; } } if(r == 0) { *b = ~*b; } *(b + 1) = t; } } /*---------------------------------------------------------------------------*/ static INLINE void tr_fixdown(const int *ISAd, int *SA, int i, int size) { int j, k; int v; int c, d, e; for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) { d = ISAd[SA[k = j++]]; if(d < (e = ISAd[SA[j]])) { k = j; d = e; } if(d <= c) { break; } } SA[i] = v; } /* Simple top-down heapsort. */ static void tr_heapsort(const int *ISAd, int *SA, int size) { int i, m; int t; m = size; if((size % 2) == 0) { m--; if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); } } for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); } if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); } for(i = m - 1; 0 < i; --i) { t = SA[0], SA[0] = SA[i]; tr_fixdown(ISAd, SA, 0, i); SA[i] = t; } } /*---------------------------------------------------------------------------*/ /* Returns the median of three elements. */ static INLINE int * tr_median3(const int *ISAd, int *v1, int *v2, int *v3) { int *t; if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); } if(ISAd[*v2] > ISAd[*v3]) { if(ISAd[*v1] > ISAd[*v3]) { return v1; } else { return v3; } } return v2; } /* Returns the median of five elements. */ static INLINE int * tr_median5(const int *ISAd, int *v1, int *v2, int *v3, int *v4, int *v5) { int *t; if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); } if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); } if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); } if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); } if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); } if(ISAd[*v3] > ISAd[*v4]) { return v4; } return v3; } /* Returns the pivot element. */ static INLINE int * tr_pivot(const int *ISAd, int *first, int *last) { int *middle; int t; t = last - first; middle = first + t / 2; if(t <= 512) { if(t <= 32) { return tr_median3(ISAd, first, middle, last - 1); } else { t >>= 2; return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1); } } t >>= 3; first = tr_median3(ISAd, first, first + t, first + (t << 1)); middle = tr_median3(ISAd, middle - t, middle, middle + t); last = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1); return tr_median3(ISAd, first, middle, last); } /*---------------------------------------------------------------------------*/ typedef struct _trbudget_t trbudget_t; struct _trbudget_t { int chance; int remain; int incval; int count; }; static INLINE void trbudget_init(trbudget_t *budget, int chance, int incval) { budget->chance = chance; budget->remain = budget->incval = incval; } static INLINE int trbudget_check(trbudget_t *budget, int size) { if(size <= budget->remain) { budget->remain -= size; return 1; } if(budget->chance == 0) { budget->count += size; return 0; } budget->remain += budget->incval - size; budget->chance -= 1; return 1; } /*---------------------------------------------------------------------------*/ static INLINE void tr_partition(const int *ISAd, int *first, int *middle, int *last, int **pa, int **pb, int v) { int *a, *b, *c, *d, *e, *f; int t, s; int x = 0; for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { } if(((a = b) < last) && (x < v)) { for(; (++b < last) && ((x = ISAd[*b]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } } for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { } if((b < (d = c)) && (x > v)) { for(; (b < --c) && ((x = ISAd[*c]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } for(; b < c;) { SWAP(*b, *c); for(; (++b < c) && ((x = ISAd[*b]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } for(; (b < --c) && ((x = ISAd[*c]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } if(a <= d) { c = b - 1; if((s = a - first) > (t = b - a)) { s = t; } for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } if((s = d - c) > (t = last - d - 1)) { s = t; } for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } first += (b - a), last -= (d - c); } *pa = first, *pb = last; } static void tr_copy(int *ISA, const int *SA, int *first, int *a, int *b, int *last, int depth) { /* sort suffixes of middle partition by using sorted order of suffixes of left and right partition. */ int *c, *d, *e; int s, v; v = b - SA - 1; for(c = first, d = a - 1; c <= d; ++c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *++d = s; ISA[s] = d - SA; } } for(c = last - 1, e = d + 1, d = b; e < d; --c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *--d = s; ISA[s] = d - SA; } } } static void tr_partialcopy(int *ISA, const int *SA, int *first, int *a, int *b, int *last, int depth) { int *c, *d, *e; int s, v; int rank, lastrank, newrank = -1; v = b - SA - 1; lastrank = -1; for(c = first, d = a - 1; c <= d; ++c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *++d = s; rank = ISA[s + depth]; if(lastrank != rank) { lastrank = rank; newrank = d - SA; } ISA[s] = newrank; } } lastrank = -1; for(e = d; first <= e; --e) { rank = ISA[*e]; if(lastrank != rank) { lastrank = rank; newrank = e - SA; } if(newrank != rank) { ISA[*e] = newrank; } } lastrank = -1; for(c = last - 1, e = d + 1, d = b; e < d; --c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *--d = s; rank = ISA[s + depth]; if(lastrank != rank) { lastrank = rank; newrank = d - SA; } ISA[s] = newrank; } } } static void tr_introsort(int *ISA, const int *ISAd, int *SA, int *first, int *last, trbudget_t *budget) { #define STACK_SIZE TR_STACKSIZE struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE]; int *a, *b, *c; int t; int v, x = 0; int incr = ISAd - ISA; int limit, next; int ssize, trlink = -1; for(ssize = 0, limit = tr_ilg(last - first);;) { if(limit < 0) { if(limit == -1) { /* tandem repeat partition */ tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1); /* update ranks */ if(a < last) { for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } } if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } } /* push */ if(1 < (b - a)) { STACK_PUSH5(NULL, a, b, 0, 0); STACK_PUSH5(ISAd - incr, first, last, -2, trlink); trlink = ssize - 2; } if((a - first) <= (last - b)) { if(1 < (a - first)) { STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink); last = a, limit = tr_ilg(a - first); } else if(1 < (last - b)) { first = b, limit = tr_ilg(last - b); } else { STACK_POP5(ISAd, first, last, limit, trlink); } } else { if(1 < (last - b)) { STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink); first = b, limit = tr_ilg(last - b); } else if(1 < (a - first)) { last = a, limit = tr_ilg(a - first); } else { STACK_POP5(ISAd, first, last, limit, trlink); } } } else if(limit == -2) { /* tandem repeat copy */ a = stack[--ssize].b, b = stack[ssize].c; if(stack[ssize].d == 0) { tr_copy(ISA, SA, first, a, b, last, ISAd - ISA); } else { if(0 <= trlink) { stack[trlink].d = -1; } tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA); } STACK_POP5(ISAd, first, last, limit, trlink); } else { /* sorted partition */ if(0 <= *first) { a = first; do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a)); first = a; } if(first < last) { a = first; do { *a = ~*a; } while(*++a < 0); next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1; if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } } /* push */ if(trbudget_check(budget, a - first)) { if((a - first) <= (last - a)) { STACK_PUSH5(ISAd, a, last, -3, trlink); ISAd += incr, last = a, limit = next; } else { if(1 < (last - a)) { STACK_PUSH5(ISAd + incr, first, a, next, trlink); first = a, limit = -3; } else { ISAd += incr, last = a, limit = next; } } } else { if(0 <= trlink) { stack[trlink].d = -1; } if(1 < (last - a)) { first = a, limit = -3; } else { STACK_POP5(ISAd, first, last, limit, trlink); } } } else { STACK_POP5(ISAd, first, last, limit, trlink); } } continue; } if((last - first) <= TR_INSERTIONSORT_THRESHOLD) { tr_insertionsort(ISAd, first, last); limit = -3; continue; } if(limit-- == 0) { tr_heapsort(ISAd, first, last - first); for(a = last - 1; first < a; a = b) { for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; } } limit = -3; continue; } /* choose pivot */ a = tr_pivot(ISAd, first, last); SWAP(*first, *a); v = ISAd[*first]; /* partition */ tr_partition(ISAd, first, first + 1, last, &a, &b, v); if((last - first) != (b - a)) { next = (ISA[*a] != v) ? tr_ilg(b - a) : -1; /* update ranks */ for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } } /* push */ if((1 < (b - a)) && (trbudget_check(budget, b - a))) { if((a - first) <= (last - b)) { if((last - b) <= (b - a)) { if(1 < (a - first)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); STACK_PUSH5(ISAd, b, last, limit, trlink); last = a; } else if(1 < (last - b)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); first = b; } else { ISAd += incr, first = a, last = b, limit = next; } } else if((a - first) <= (b - a)) { if(1 < (a - first)) { STACK_PUSH5(ISAd, b, last, limit, trlink); STACK_PUSH5(ISAd + incr, a, b, next, trlink); last = a; } else { STACK_PUSH5(ISAd, b, last, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } else { STACK_PUSH5(ISAd, b, last, limit, trlink); STACK_PUSH5(ISAd, first, a, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } else { if((a - first) <= (b - a)) { if(1 < (last - b)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); STACK_PUSH5(ISAd, first, a, limit, trlink); first = b; } else if(1 < (a - first)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); last = a; } else { ISAd += incr, first = a, last = b, limit = next; } } else if((last - b) <= (b - a)) { if(1 < (last - b)) { STACK_PUSH5(ISAd, first, a, limit, trlink); STACK_PUSH5(ISAd + incr, a, b, next, trlink); first = b; } else { STACK_PUSH5(ISAd, first, a, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } else { STACK_PUSH5(ISAd, first, a, limit, trlink); STACK_PUSH5(ISAd, b, last, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } } else { if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; } if((a - first) <= (last - b)) { if(1 < (a - first)) { STACK_PUSH5(ISAd, b, last, limit, trlink); last = a; } else if(1 < (last - b)) { first = b; } else { STACK_POP5(ISAd, first, last, limit, trlink); } } else { if(1 < (last - b)) { STACK_PUSH5(ISAd, first, a, limit, trlink); first = b; } else if(1 < (a - first)) { last = a; } else { STACK_POP5(ISAd, first, last, limit, trlink); } } } } else { if(trbudget_check(budget, last - first)) { limit = tr_ilg(last - first), ISAd += incr; } else { if(0 <= trlink) { stack[trlink].d = -1; } STACK_POP5(ISAd, first, last, limit, trlink); } } } #undef STACK_SIZE } /*---------------------------------------------------------------------------*/ /* Tandem repeat sort */ static void trsort(int *ISA, int *SA, int n, int depth) { int *ISAd; int *first, *last; trbudget_t budget; int t, skip, unsorted; trbudget_init(&budget, tr_ilg(n) * 2 / 3, n); /* trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */ for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) { first = SA; skip = 0; unsorted = 0; do { if((t = *first) < 0) { first -= t; skip += t; } else { if(skip != 0) { *(first + skip) = skip; skip = 0; } last = SA + ISA[t] + 1; if(1 < (last - first)) { budget.count = 0; tr_introsort(ISA, ISAd, SA, first, last, &budget); if(budget.count != 0) { unsorted += budget.count; } else { skip = first - last; } } else if((last - first) == 1) { skip = -1; } first = last; } } while(first < (SA + n)); if(skip != 0) { *(first + skip) = skip; } if(unsorted == 0) { break; } } } /*---------------------------------------------------------------------------*/ /* Sorts suffixes of type B*. */ static int sort_typeBstar(const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n) { int *PAb, *ISAb, *buf; #ifdef _OPENMP int *curbuf; int l; #endif int i, j, k, t, m, bufsize; int c0, c1; #ifdef _OPENMP int d0, d1; int tmp; #endif /* Initialize bucket arrays. */ for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; } for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; } /* Count the number of occurrences of the first one or two characters of each type A, B and B* suffix. Moreover, store the beginning position of all type B* suffixes into the array SA. */ i = n - 1; m = n; c0 = T[n - 1]; while ( 0 <= i) { /* type A suffix. */ do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1)); if(0 <= i) { /* type B* suffix. */ ++BUCKET_BSTAR(c0, c1); SA[--m] = i; /* type B suffix. */ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { ++BUCKET_B(c0, c1); } } } m = n - m; /* note: A type B* suffix is lexicographically smaller than a type B suffix that begins with the same first two characters. */ /* Calculate the index of start/end point of each bucket. */ for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) { t = i + BUCKET_A(c0); BUCKET_A(c0) = i + j; /* start point */ i = t + BUCKET_B(c0, c0); for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) { j += BUCKET_BSTAR(c0, c1); BUCKET_BSTAR(c0, c1) = j; /* end point */ i += BUCKET_B(c0, c1); } } if(0 < m) { /* Sort the type B* suffixes by their first two characters. */ PAb = SA + n - m; ISAb = SA + m; for(i = m - 2; 0 <= i; --i) { t = PAb[i], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = i; } t = PAb[m - 1], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = m - 1; /* Sort the type B* substrings using sssort. */ #ifdef _OPENMP tmp = omp_get_max_threads(); buf = SA + m, bufsize = (n - (2 * m)) / tmp; c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m; #pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp) { tmp = omp_get_thread_num(); curbuf = buf + tmp * bufsize; k = 0; for(;;) { #pragma omp critical(sssort_lock) { if(0 < (l = j)) { d0 = c0, d1 = c1; do { k = BUCKET_BSTAR(d0, d1); if(--d1 <= d0) { d1 = ALPHABET_SIZE - 1; if(--d0 < 0) { break; } } } while(((l - k) <= 1) && (0 < (l = k))); c0 = d0, c1 = d1, j = k; } } if(l == 0) { break; } sssort(T, PAb, SA + k, SA + l, curbuf, bufsize, 2, n, *(SA + k) == (m - 1)); } } #else buf = SA + m, bufsize = n - (2 * m); for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { i = BUCKET_BSTAR(c0, c1); if(1 < (j - i)) { sssort(T, PAb, SA + i, SA + j, buf, bufsize, 2, n, *(SA + i) == (m - 1)); } } } #endif /* Compute ranks of type B* substrings. */ for(i = m - 1; 0 <= i; --i) { if(0 <= SA[i]) { j = i; do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i])); SA[i + 1] = i - j; if(i <= 0) { break; } } j = i; do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0); ISAb[SA[i]] = j; } /* Construct the inverse suffix array of type B* suffixes using trsort. */ trsort(ISAb, SA, m, 1); /* Set the sorted order of tyoe B* suffixes. */ for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } if(0 <= i) { t = i; for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { } SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t; } } /* Calculate the index of start/end point of each bucket. */ BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */ for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) { i = BUCKET_A(c0 + 1) - 1; for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) { t = i - BUCKET_B(c0, c1); BUCKET_B(c0, c1) = i; /* end point */ /* Move all type B* suffixes to the correct position. */ for(i = t, j = BUCKET_BSTAR(c0, c1); j <= k; --i, --k) { SA[i] = SA[k]; } } BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */ BUCKET_B(c0, c0) = i; /* end point */ } } return m; } /* Constructs the suffix array by using the sorted order of type B* suffixes. */ static void construct_SA(const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int m) { int *i, *j, *k; int s; int c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); *j = ~s; c0 = T[--s]; if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else { assert(((s == 0) && (T[s] == c1)) || (s < 0)); *j = ~s; } } } } /* Construct the suffix array by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; if((s == 0) || (T[s - 1] < c0)) { s = ~s; } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else { assert(s < 0); *i = ~s; } } } /* Constructs the burrows-wheeler transformed string directly by using the sorted order of type B* suffixes. */ static int construct_BWT(const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int m) { int *i, *j, *k, *orig; int s; int c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); c0 = T[--s]; *j = ~((int)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else if(s != 0) { *j = ~s; #ifndef NDEBUG } else { assert(T[s] == c1); #endif } } } } /* Construct the BWTed string by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n, orig = SA; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; *i = c0; if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else if(s != 0) { *i = ~s; } else { orig = i; } } return orig - SA; } /*---------------------------------------------------------------------------*/ /*- Function -*/ int divsufsort(const unsigned char *T, int *SA, int n) { int *bucket_A, *bucket_B; int m; int err = 0; /* Check arguments. */ if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; } else if(n == 0) { return 0; } else if(n == 1) { SA[0] = 0; return 0; } else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; } bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int)); bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int)); /* Suffixsort. */ if((bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, SA, bucket_A, bucket_B, n); construct_SA(T, SA, bucket_A, bucket_B, n, m); } else { err = -2; } free(bucket_B); free(bucket_A); return err; } /* Constructs the burrows-wheeler transformed string of a given string. * @param T[0..n-1] The input string. * @param U[0..n-1] The output string. (can be T) * @param A[0..n-1] The temporary array. (can be NULL) * @param n The length of the given string. * @return The primary index if no error occurred, -1 or -2 otherwise. */ int divbwt(const unsigned char *T, unsigned char *U, int *A, int n) { int *B; int *bucket_A, *bucket_B; int m, pidx, i; /* Check arguments. */ if((T == NULL) || (U == NULL) || (n < 0)) { return -1; } else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); } bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int)); bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int)); /* Burrows-Wheeler Transform. */ if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, B, bucket_A, bucket_B, n); pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m); /* Copy to output string. */ U[0] = T[n - 1]; for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; } for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; } pidx += 1; } else { pidx = -2; } free(bucket_B); free(bucket_A); if(A == NULL) { free(B); } return pidx; }
mafillvmain.c
/* CalculiX - A 3-dimensional finite element program */ /* Copyright (C) 1998-2015 Guido Dhondt */ /* This program is free software; you can redistribute it and/or */ /* modify it under the terms of the GNU General Public License as */ /* published by the Free Software Foundation(version 2); */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <unistd.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <pthread.h> #include "CalculiX.h" static char *lakonf1; static ITG num_cpus,*nef1,*ipnei1,*neifa1,*neiel1,*jq1,*irow1,*nzs1,*ielfa1,* ifabou1,*nbody1,*neq1,*nactdohinv1; static double *auv1=NULL,*adv1=NULL,*bv1=NULL,*vfa1,*xxn1,*area1,*vel1, *cosa1,*umfa1,*xlet1,*xle1,*gradvfa1,*xxi1,*body1,*volume1,*dtimef1, *velo1,*veloo1,*sel1,*xrlfa1,*gamma1,*xxj1,*a11,*a21,*a31,*flux1; void mafillvmain(ITG *nef,ITG *ipnei,ITG *neifa,ITG *neiel, double *vfa,double *xxn,double *area,double *auv,double *adv, ITG *jq,ITG *irow,ITG *nzs,double *bv,double *vel,double *cosa, double *umfa,double *xlet,double *xle,double *gradvfa, double *xxi,double *body,double *volume, ITG *ielfa,char *lakonf,ITG *ifabou,ITG *nbody,ITG *neq, double *dtimef,double *velo,double *veloo, double *sel,double *xrlfa,double *gamma,double *xxj, ITG *nactdohinv,double *a1,double *a2,double *a3,double *flux){ ITG i,j; /* variables for multithreading procedure */ ITG sys_cpus,*ithread=NULL; char *env,*envloc,*envsys; num_cpus = 0; sys_cpus=0; /* explicit user declaration prevails */ envsys=getenv("NUMBER_OF_CPUS"); if(envsys){ sys_cpus=atoi(envsys); if(sys_cpus<0) sys_cpus=0; } /* automatic detection of available number of processors */ if(sys_cpus==0){ sys_cpus = getSystemCPUs(); if(sys_cpus<1) sys_cpus=1; } /* local declaration prevails, if strictly positive */ envloc = getenv("CCX_NPROC_CFD"); if(envloc){ num_cpus=atoi(envloc); if(num_cpus<0){ num_cpus=0; }else if(num_cpus>sys_cpus){ num_cpus=sys_cpus; } } /* else global declaration, if any, applies */ env = getenv("OMP_NUM_THREADS"); if(num_cpus==0){ if (env) num_cpus = atoi(env); if (num_cpus < 1) { num_cpus=1; }else if(num_cpus>sys_cpus){ num_cpus=sys_cpus; } } // next line is to be inserted in a similar way for all other paralell parts if(*nef<num_cpus) num_cpus=*nef; pthread_t tid[num_cpus]; /* allocating fields for lhs and rhs matrix */ NNEW(adv1,double,num_cpus**neq); NNEW(auv1,double,(long long)num_cpus*2**nzs); NNEW(bv1,double,num_cpus*3**neq); /* calculating the stiffness and/or mass matrix (symmetric part) */ nef1=nef;ipnei1=ipnei;neifa1=neifa;neiel1=neiel;vfa1=vfa;xxn1=xxn; area1=area;jq1=jq;irow1=irow;nzs1=nzs;vel1=vel;cosa1=cosa;umfa1=umfa; xlet1=xlet;xle1=xle;gradvfa1=gradvfa;xxi1=xxi;body1=body;volume1=volume; ielfa1=ielfa;lakonf1=lakonf;ifabou1=ifabou;nbody1=nbody;neq1=neq; dtimef1=dtimef;velo1=velo;veloo1=veloo;sel1=sel;xrlfa1=xrlfa; gamma1=gamma;xxj1=xxj;nactdohinv1=nactdohinv;a11=a1;a21=a2;a31=a3; flux1=flux; /* create threads and wait */ NNEW(ithread,ITG,num_cpus); for(i=0; i<num_cpus; i++) { ithread[i]=i; pthread_create(&tid[i], NULL, (void *)mafillvmt, (void *)&ithread[i]); } for(i=0; i<num_cpus; i++) pthread_join(tid[i], NULL); SFREE(ithread); /* copying and accumulating the stiffnes and/or mass matrix */ #pragma omp parallel \ default(none) \ shared(neq,adv,adv1,num_cpus,nzs,auv,auv1,bv,bv1) \ private(i,j) { #pragma omp for for(i=0;i<*neq;i++){ adv[i]=adv1[i]; for(j=1;j<num_cpus;j++){ adv[i]+=adv1[i+j**neq]; } } #pragma omp for for(i=0;i<2**nzs;i++){ auv[i]=auv1[i]; for(j=1;j<num_cpus;j++){ auv[i]+=auv1[i+(long long)j*2**nzs]; } } #pragma omp for for(i=0;i<3**neq;i++){ bv[i]=bv1[i]; for(j=1;j<num_cpus;j++){ bv[i]+=bv1[i+j*3**neq]; } } } SFREE(adv1); SFREE(auv1); SFREE(bv1); return; } /* subroutine for multithreading of mafillv */ void *mafillvmt(ITG *i){ ITG indexadv,indexbv,nefa,nefb,nefdelta; long long indexauv; indexadv=*i**neq1; indexauv=(long long)*i*2**nzs1; indexbv=*i*3**neq1; // ceil -> floor nefdelta=(ITG)floor(*nef1/(double)num_cpus); nefa=*i*nefdelta+1; nefb=(*i+1)*nefdelta; // next line! -> all parallel sections if((*i==num_cpus-1)&&(nefb<*nef1)) nefb=*nef1; FORTRAN(mafillv,(nef1,ipnei1,neifa1,neiel1,vfa1,xxn1,area1, &auv1[indexauv],&adv1[indexadv],jq1,irow1,nzs1,&bv1[indexbv], vel1,cosa1,umfa1,xlet1,xle1,gradvfa1,xxi1, body1,volume1,ielfa1,lakonf1,ifabou1,nbody1,neq1, dtimef1,velo1,veloo1,sel1,xrlfa1,gamma1,xxj1,nactdohinv1,a11, a21,a31,flux1,&nefa,&nefb)); return NULL; }
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 4296.0 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *); static ssize_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->id != (char *) NULL) (void) CloneString(&clone_info->id,draw_info->id); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, % ExceptionInfo *excetion) % % A description of each parameter follows: % % o ConvertPathToPolygon() returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i=0; i < (ssize_t) polygon_info->number_edges; i++) if (polygon_info->edges[i].points != (PointInfo *) NULL) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { EdgeInfo *p; ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, ExceptionInfo *exception) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo *) NULL); } number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; polygon_info->number_edges=edge+1; points=(PointInfo *) NULL; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } } polygon_info->number_edges=edge; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges, polygon_info->number_edges,sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { EdgeInfo *edge_info; edge_info=polygon_info->edges+i; edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points, edge_info->number_points,sizeof(*edge_info->points)); if (edge_info->points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o ConvertPrimitiveToPath() returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % */ static void LogPathInfo(const PathInfo *path_info) { const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PathInfo *) NULL); } coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *) NULL) draw_info->id=DestroyString(draw_info->id); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=CastDoubleToLong(ceil(edge.y1-0.5)); stop=CastDoubleToLong(floor(edge.y2+0.5)); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; ssize_t x; Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; if (status == MagickFalse) continue; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,CastDoubleToLong( ceil(inverse_edge.x1-0.5)),y,(size_t) CastDoubleToLong(floor( inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=CastDoubleToLong(ceil(inverse_edge.x1-0.5)); x <= CastDoubleToLong(floor(inverse_edge.x2+0.5)); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); if ((flags & RhoValue) != 0) resolution.x=geometry_info.rho; resolution.y=resolution.x; if ((flags & SigmaValue) != 0) resolution.y=geometry_info.sigma; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask == (Image *) NULL) status=MagickFalse; else { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status&=NegateImage(clip_mask,MagickFalse,exception); } if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; double dx, dy; ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (double) (MaxBezierCoordinates >> 2)) continue; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; Quantum *magick_restrict q; ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat*PerceptibleReciprocal(gradient->radius); } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const double pad) { char *text = (char *) NULL; double extent; size_t quantum; ssize_t i; /* Check if there is enough storage for drawing primitives. */ quantum=sizeof(**mvg_info->primitive_info); extent=(double) mvg_info->offset+pad+(PrimitiveExtentPad+1)*(double) quantum; if (extent <= (double) *mvg_info->extent) return(MagickTrue); if ((extent >= (double) MAGICK_SSIZE_MAX) || (IsNaN(extent) != 0)) return(MagickFalse); for (i=0; i < mvg_info->offset; i++) if (((*mvg_info->primitive_info)[i].primitive == TextPrimitive) || ((*mvg_info->primitive_info)[i].primitive == ImagePrimitive)) if ((*mvg_info->primitive_info)[i].text != (char *) NULL) text=(*mvg_info->primitive_info)[i].text; *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) (extent+1),quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i <= (ssize_t) extent; i++) { (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; (*mvg_info->primitive_info)[i].text=(char *) NULL; } return(MagickTrue); } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory((size_t) (PrimitiveExtentPad+1)*quantum); (void) memset(*mvg_info->primitive_info,0,(size_t) ((PrimitiveExtentPad+1)* quantum)); *mvg_info->extent=1; (*mvg_info->primitive_info)[0].text=text; mvg_info->offset=0; return(MagickFalse); } static inline double GetDrawValue(const char *magick_restrict string, char **magick_restrict sentinal) { char **magick_restrict q; double value; q=sentinal; value=InterpretLocaleValue(string,q); sentinal=q; return(value); } static int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { const char *end, *start; (void) GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ (void) GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { if (GetNextToken(p,&p,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ (void) GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=GetDrawValue(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; const char *p; ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=(size_t) PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (number_points+1),sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) (number_points+1)* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } if (LocaleCompare(token,graphic_context[n]->id) == 0) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if ((graphic_context[n]->render != MagickFalse) && (mvg_class != (const char *) NULL) && (p > primitive)) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; (void) GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ (void) GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } if (LocaleCompare("currentColor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; (void) GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; (void) GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if (mvg_class != (const char *) NULL) { (void) DrawPatternPath(image,draw_info,mvg_class, &graphic_context[n]->fill_pattern,exception); break; } (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) { (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); break; } status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha*=opacity; else graphic_context[n]->fill_alpha=QuantumRange*opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; (void) GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; (void) GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; (void) GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; (void) GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; (void) GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (IsPoint(token) == MagickFalse) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* GetDrawValue(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; } else { graphic_context[n]->fill_alpha=QuantumRange*opacity; graphic_context[n]->stroke_alpha=QuantumRange*opacity; } break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); segment.x1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.x2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->id,token); } break; } if (LocaleCompare("mask",token) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo region; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); region.x=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.y=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.width=(size_t) CastDoubleToLong(floor(GetDrawValue( token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.height=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) region.width,(double) region.height,(double) region.x,(double) region.y); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; (void) GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if (mvg_class != (const char *) NULL) { (void) DrawPatternPath(image,draw_info,mvg_class, &graphic_context[n]->stroke_pattern,exception); break; } (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) { (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); break; } status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; (void) GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; (void) GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha*=opacity; else graphic_context[n]->stroke_alpha=QuantumRange*opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ (void) GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,(double) number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(BezierQuantum*(double) primitive_info[j].coordinates); break; } case PathPrimitive: { char *s, *t; (void) GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=GetDrawValue(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,(double) number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { double dx, dy, maximum_length; if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (MaxBezierCoordinates/100.0)) ThrowPointExpectedException(keyword,exception); status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates < 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (status == 0) break; primitive_info[i].primitive=UndefinedPrimitive; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); /* Sanity check. */ status&=CheckPrimitiveExtent(&mvg_info,ExpandAffine( &graphic_context[n]->affine)); if (status == 0) break; status&=CheckPrimitiveExtent(&mvg_info,(double) graphic_context[n]->stroke_width); if (status == 0) break; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#00000000",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonTLS(PolygonInfo **polygon_info) { ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonTLS(const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo **) NULL); } (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info,exception); if (path_info == (PathInfo *) NULL) return(DestroyPolygonTLS(polygon_info)); polygon_info[0]=ConvertPathToPolygon(path_info,exception); if (polygon_info[0] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonTLS(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static MagickBooleanType ClonePolygonEdgesTLS(PolygonInfo **polygon_info, const size_t number_threads,ExceptionInfo *exception) { ssize_t i; for (i=1; i < (ssize_t) number_threads; i++) { EdgeInfo *edge_info; ssize_t j; polygon_info[i]=(PolygonInfo *) AcquireMagickMemory( sizeof(*polygon_info[i])); if (polygon_info[i] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } polygon_info[i]->number_edges=0; edge_info=polygon_info[0]->edges; polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory( polygon_info[0]->number_edges,sizeof(*edge_info)); if (polygon_info[i]->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } (void) memcpy(polygon_info[i]->edges,edge_info, polygon_info[0]->number_edges*sizeof(*edge_info)); for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) polygon_info[i]->edges[j].points=(PointInfo *) NULL; polygon_info[i]->number_edges=polygon_info[0]->number_edges; for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) { edge_info=polygon_info[0]->edges+j; polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory( edge_info->number_points,sizeof(*edge_info)); if (polygon_info[i]->edges[j].points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } (void) memcpy(polygon_info[i]->edges[j].points,edge_info->points, edge_info->number_points*sizeof(*edge_info->points)); } } return(MagickTrue); } static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge) { assert(edge < (ssize_t) polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < (ssize_t) polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; const PointInfo *q; EdgeInfo *p; PointInfo delta; ssize_t i, j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { p--; (void) DestroyEdge(polygon_info,j--); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); /* segLen*point-cos(theta) */ if (beta <= 0.0) { /* Cosine <= 0, point is closest to q. */ delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; /* segLen*segLen */ if (beta >= alpha) { /* Point is closest to q+1. */ delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { /* Point is closest to point between q & q+1. */ alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction != 0 ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction != 0 ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { typedef struct _ExtentInfo { ssize_t x1, y1, x2, y2; } ExtentInfo; CacheView *image_view; const char *artifact; double mid; EdgeInfo *p; ExtentInfo poly_extent; MagickBooleanType fill, status; PolygonInfo **magick_restrict polygon_info; SegmentInfo bounds; size_t number_threads; ssize_t i, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonTLS(primitive_info,exception); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; artifact=GetImageArtifact(image,"draw:render-bounding-rectangles"); if (IsStringTrue(artifact) != MagickFalse) (void) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonTLS(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; poly_extent.x1=CastDoubleToLong(ceil(bounds.x1-0.5)); poly_extent.y1=CastDoubleToLong(ceil(bounds.y1-0.5)); poly_extent.x2=CastDoubleToLong(floor(bounds.x2+0.5)); poly_extent.y2=CastDoubleToLong(floor(bounds.y2+0.5)); number_threads=GetMagickNumberThreads(image,image,poly_extent.y2- poly_extent.y1+1,1); status=ClonePolygonEdgesTLS(polygon_info,number_threads,exception); if (status == MagickFalse) { polygon_info=DestroyPolygonTLS(polygon_info); return(status); } image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ num_threads(number_threads) #endif for (y=poly_extent.y1; y <= poly_extent.y2; y++) { PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; x=poly_extent.x1; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (poly_extent.x2- x+1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= poly_extent.x2; x++) { if ((x == CastDoubleToLong(ceil(primitive_info->point.x-0.5))) && (y == CastDoubleToLong(ceil(primitive_info->point.y-0.5)))) { GetFillColor(draw_info,x-poly_extent.x1,y-poly_extent.y1,&pixel, exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonTLS(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ num_threads(number_threads) #endif for (y=poly_extent.y1; y <= poly_extent.y2; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,poly_extent.x1,y,(size_t) (poly_extent.x2-poly_extent.x1+1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=poly_extent.x1; x <= poly_extent.x2; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0; } GetFillColor(draw_info,x-poly_extent.x1,y-poly_extent.y1,&fill_color, exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-poly_extent.x1,y-poly_extent.y1,&stroke_color, exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonTLS(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; ssize_t i, x; ssize_t coordinates, y; x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status&=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); composite_images=(Image *) NULL; if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else if (*primitive_info->text != '\0') { MagickBooleanType path_status; struct stat attributes; /* Read composite image. */ (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); (void) SetImageInfo(clone_info,1,exception); (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); if (clone_info->size != (char *) NULL) clone_info->size=DestroyString(clone_info->size); if (clone_info->extract != (char *) NULL) clone_info->extract=DestroyString(clone_info->extract); path_status=GetPathAttributes(clone_info->filename,&attributes); if (path_status != MagickFalse) { if (S_ISCHR(attributes.st_mode) == 0) composite_images=ReadImage(clone_info,exception); else (void) ThrowMagickException(exception,GetMagickModule(), FileOpenError,"UnableToOpenFile","`%s'", clone_info->filename); } else if ((LocaleCompare(clone_info->magick,"ftp") != 0) && (LocaleCompare(clone_info->magick,"http") != 0) && (LocaleCompare(clone_info->magick,"https") != 0)) composite_images=ReadImage(clone_info,exception); else (void) ThrowMagickException(exception,GetMagickModule(), FileOpenError,"UnableToOpenFile","`%s'",clone_info->filename); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=MagickFalse; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=CastDoubleToLong(ceil(primitive_info[1].point.x-0.5)); y1=CastDoubleToLong(ceil(primitive_info[1].point.y-0.5)); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; status&=TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status&=SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status&=DrawAffineImage(image,composite_image,&affine,exception); else status&=CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double) GetPixelAlpha(image,q),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double point_x, point_y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; point_x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); point_y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((point_x < MagickEpsilon) && (point_y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status&=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(draw_info,p,exception); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; double cosine, sine; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; if (fabs(alpha*alpha+beta*beta) < MagickEpsilon) return(TraceLine(primitive_info,start,end)); factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil(fabs((double) (theta/(0.5* MagickPI+MagickEpsilon))))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == 0) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } if (status == 0) return(MagickFalse); mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) MAGICK_SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) MAGICK_SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; } } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=MagickMin(quantum/number_coordinates,BezierQuantum); coefficients=(double *) AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates* sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points=(PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *) NULL) coefficients=(double *) RelinquishMagickMemory(coefficients); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,(double) control_points+1) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if (CheckPrimitiveExtent(mvg_info,coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static ssize_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; PrimitiveInfo *q; ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(-1); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; PrimitiveInfo *p; ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; double dx, dy; ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((pad_p) > MaxBezierCoordinates) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((pad_q) > MaxBezierCoordinates) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ (void) ThrowMagickException(exception,GetMagickModule(), \ ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = {0.0, 0.0}, dy = {0.0, 0.0}, inverse_slope = {0.0, 0.0}, slope = {0.0, 0.0}, theta = {0.0, 0.0}; /* Allocate paths. */ number_vertices=primitive_info->coordinates; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x; offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y; closed_path=(fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } extent_p=2*number_vertices; extent_q=2*number_vertices; stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad, sizeof(*stroke_p)); stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0*PerceptibleReciprocal(slope.p)); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; stroke_q[p++]=box_q[0]; stroke_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } DisableMSCWarning(4127) CheckPathExtent(MaxStrokePad,MaxStrokePad); RestoreMSCWarning dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta. q-theta.p)/(2.0*sqrt(PerceptibleReciprocal(mid)))))); DisableMSCWarning(4127) CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad); RestoreMSCWarning stroke_q[q].x=box_q[1].x; stroke_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } stroke_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.p- theta.q)/(2.0*sqrt((double) (PerceptibleReciprocal(mid))))))); DisableMSCWarning(4127) CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad); RestoreMSCWarning stroke_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } stroke_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } stroke_p[p++]=box_p[1]; stroke_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
vecadd_opt2.c
#include <stdio.h> #include <omp.h> #include "timer.h" // large enough to force into main memory #define ARRAY_SIZE 80000000 static double a[ARRAY_SIZE], b[ARRAY_SIZE], c[ARRAY_SIZE]; void vector_add(double *c, double *a, double *b, int n); int main(int argc, char *argv[]){ #pragma omp parallel if (omp_get_thread_num() == 0) printf("Running with %d thread(s)\n",omp_get_num_threads()); struct timespec tstart; double time_sum = 0.0; #pragma omp parallel for for (int i=0; i<ARRAY_SIZE; i++) { a[i] = 1.0; b[i] = 2.0; } cpu_timer_start(&tstart); vector_add(c, a, b, ARRAY_SIZE); time_sum += cpu_timer_stop(tstart); printf("Runtime is %lf msecs\n", time_sum); } void vector_add(double *c, double *a, double *b, int n) { #pragma omp parallel for for (int i=0; i < n; i++){ c[i] = a[i] + b[i]; } }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_binop__ne_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__ne_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__ne_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__ne_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint8) // A*D function (colscale): GB (_AxD__ne_uint8) // D*A function (rowscale): GB (_DxB__ne_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__ne_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__ne_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint8) // C=scalar+B GB (_bind1st__ne_uint8) // C=scalar+B' GB (_bind1st_tran__ne_uint8) // C=A+scalar GB (_bind2nd__ne_uint8) // C=A'+scalar GB (_bind2nd_tran__ne_uint8) // C type: bool // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_UINT8 || GxB_NO_NE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ne_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tinyexr.h
#ifndef TINYEXR_H_ #define TINYEXR_H_ /* Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_THREAD #define TINYEXR_USE_THREAD (0) // No threaded loading. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_OPENMP #ifdef _OPENMP #define TINYEXR_USE_OPENMP (1) #else #define TINYEXR_USE_OPENMP (0) #endif #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-6) #define TINYEXR_ERROR_CANT_OPEN_FILE (-7) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8) #define TINYEXR_ERROR_INVALID_HEADER (-9) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10) #define TINYEXR_ERROR_CANT_WRITE_FILE (-11) #define TINYEXR_ERROR_SERIALZATION_FAILED (-12) #define TINYEXR_ERROR_LAYER_NOT_FOUND (-13) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRBox2i { int min_x; int min_y; int max_x; int max_y; } EXRBox2i; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; EXRBox2i data_window; EXRBox2i display_window; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { For backward compatibility. Not recommended to use. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // Loads single-frame OpenEXR image by specifying layer name. Assume EXR image // contains A(single channel alpha) or RGB(A) channels. Application must free // image data as returned by `out_rgba` Result image format is: float x RGBA x // width x hight Returns negative value and may set error string in `err` when // there's an error When the specified layer name is not found in the EXR file, // the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`. extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layer_name, const char **err); // // Get layer infos from EXR file. // // @param[out] layer_names List of layer names. Application must free memory // after using this. // @param[out] num_layers The number of layers // @param[out] err Error string(will be filled when the function returns error // code). Free it using FreeEXRErrorMessage after using this value. // // @return TINYEXR_SUCCEES upon success. // extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err); // @deprecated { to be removed. } // Simple wrapper API for ParseEXRHeaderFromFile. // checking given file is a EXR file(by just look up header) // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for // others extern int IsEXR(const char *filename); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Frees internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Frees internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Frees error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEFINED #define TINYEXR_IMPLEMENTATION_DEFINED #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #include <windows.h> // for UTF-8 #endif #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> // #include <iostream> // debug #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #if TINYEXR_USE_THREAD #include <atomic> #include <thread> #endif #endif // __cplusplus > 199711L #if TINYEXR_USE_OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Weverything" #endif #include "zfp.h" #ifdef __clang__ #pragma clang diagnostic pop #endif #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #if __has_warning("-Wtautological-constant-compare") #pragma clang diagnostic ignored "-Wtautological-constant-compare" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occurred in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #ifdef _MSC_VER #pragma warning(pop) #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #endif #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-function" #endif static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef __GNUC__ #pragma GCC diagnostic pop #endif static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(float *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else float tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(&outLen); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { int min_x; int min_y; int max_x; int max_y; } Box2iInfo; struct HeaderInfo { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; Box2iInfo data_window; int line_order; Box2iInfo display_window; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window.min_x = 0; data_window.min_y = 0; data_window.max_x = 0; data_window.max_y = 0; line_order = 0; display_window.min_x = 0; display_window.min_y = 0; display_window.max_x = 0; display_window.max_y = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } }; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(&info.pixel_type); tinyexr::swap4(&info.x_sampling); tinyexr::swap4(&info.y_sampling); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(&pixel_type); tinyexr::swap4(&x_sampling); tinyexr::swap4(&y_sampling); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressible run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; // Fixes #116: Add bounds check to in buffer. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } // Workaround for issue #112. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`. if (src_size <= 2) { return false; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); if (ret != static_cast<int>(uncompressed_size)) { return false; } // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierarchical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- unsigned int len : 8; // code length 0 unsigned int lit : 24; // lit p size unsigned int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode >= ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { unsigned int *p = pl->p; pl->p = new unsigned int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new unsigned int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety // Issue 100. if ((out - 1) < ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); if (size_t((ptr - inPtr) + length) > inLen) { return false; } std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; unsigned int precision; unsigned int __pad0; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* unsigned int __pad1; ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0; } }; static bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes, std::string *err) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) { if (attributes[i].size == 1) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; break; } else { if (err) { (*err) += "zfpCompressionType attribute must be uchar(1 byte) type.\n"; } return false; } } } if (!foundType) { if (err) { (*err) += "`zfpCompressionType` attribute not found.\n"; } return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionRate` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionPrecision` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionTolerance` attribute not found.\n"; } } else { if (err) { (*err) += "Unknown value specified for `zfpCompressionType`.\n"; } } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, size_t num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = size_t(dst_width) * size_t(dst_num_lines) * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, static_cast<unsigned int>(dst_width), static_cast<unsigned int>(dst_num_lines) * static_cast<unsigned int>(num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = size_t(dst_width) * size_t(dst_num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // decompress 4x4 pixel block. for (size_t y = 0; y < size_t(dst_num_lines); y += 4) { for (size_t x = 0; x < size_t(dst_width); x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. static bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, static_cast<unsigned int>(width), static_cast<unsigned int>(num_lines * num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = size_t(width) * size_t(num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // compress 4x4 pixel block. for (size_t y = 0; y < size_t(num_lines); y += 4) { for (size_t x = 0; x < size_t(width); x += 4) { float fblock[16]; for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp)); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); if (!ret) { return false; } // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); if (dstLen == 0) { return false; } if (!tinyexr::DecompressRle( reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; std::string e; if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes, int(num_attributes), &e)) { // This code path should not be reachable. assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static bool DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (tile_size_x > data_width || tile_size_y > data_height || tile_size_x * tile_offset_x > data_width || tile_size_y * tile_offset_y > data_height) { return false; } // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } #ifdef _WIN32 static inline std::wstring UTF8ToWchar(const std::string &str) { int wstr_size = MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0); std::wstring wstr(wstr_size, 0); MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0], (int)wstr.size()); return wstr; } #endif static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window.min_x = 0; info->data_window.min_y = 0; info->data_window.max_x = 0; info->data_window.max_y = 0; info->line_order = 0; // @fixme info->display_window.min_x = 0; info->display_window.min_y = 0; info->display_window.max_x = 0; info->display_window.max_y = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) || y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) { if (err) { (*err) = "Tile sizes were invalid."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->data_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->data_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->data_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->data_window.min_x); tinyexr::swap4(&info->data_window.min_y); tinyexr::swap4(&info->data_window.max_x); tinyexr::swap4(&info->data_window.max_y); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->display_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->display_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->display_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->display_window.min_x); tinyexr::swap4(&info->display_window.min_y); tinyexr::swap4(&info->display_window.max_x); tinyexr::swap4(&info->display_window.max_y); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4(&info->pixel_aspect_ratio); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4(&info->screen_window_center[0]); tinyexr::swap4(&info->screen_window_center[1]); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4(&info->screen_window_width); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(&info->chunk_count); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window.min_x = info.display_window.min_x; exr_header->display_window.min_y = info.display_window.min_y; exr_header->display_window.max_x = info.display_window.max_x; exr_header->display_window.max_y = info.display_window.max_y; exr_header->data_window.min_x = info.data_window.min_x; exr_header->data_window.min_y = info.data_window.min_y; exr_header->data_window.max_x = info.data_window.max_x; exr_header->data_window.max_y = info.data_window.max_y; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy pointer exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, exr_header->custom_attributes, int(exr_header->num_custom_attributes), err)) { return TINYEXR_ERROR_INVALID_HEADER; } #endif } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_y < exr_header->data_window.min_y) { if (err) { (*err) += "Invalid data window.\n"; } return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if ((data_width > threshold) || (data_height > threshold)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); int err_code = TINYEXR_SUCCESS; #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<size_t> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { size_t tile_idx = 0; while ((tile_idx = tile_count++) < num_tiles) { #else for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { #endif // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { // TODO(LTE): atomic if (err) { (*err) += "Insufficient data size.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; break; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(&tile_coordinates[0]); tinyexr::swap4(&tile_coordinates[1]); tinyexr::swap4(&tile_coordinates[2]); tinyexr::swap4(&tile_coordinates[3]); // @todo{ LoD } if (tile_coordinates[2] != 0) { err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE; break; } if (tile_coordinates[3] != 0) { err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE; break; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(&data_len); if (data_len < 4 || size_t(data_len) > data_size) { // TODO(LTE): atomic if (err) { (*err) += "Insufficient data length.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; break; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; bool ret = tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); if (!ret) { // TODO(LTE): atomic if (err) { (*err) += "Failed to decode tile data.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; } exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) } })); } // num_thread loop for (auto &t : workers) { t.join(); } #else } #endif if (err_code != TINYEXR_SUCCESS) { return err_code; } exr_image->num_tiles = static_cast<int>(num_tiles); } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> y_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_blocks)) { num_threads = int(num_blocks); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int y = 0; while ((y = y_count++) < int(num_blocks)) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { #endif size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(&line_no); tinyexr::swap4(&data_len); if (size_t(data_len) > data_size) { invalid_data = true; } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) { // Too large value. Assume this is invalid // 2**20 = 1048576 = heuristic value. invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example // `data_len < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window.max_y + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window.min_y); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window.min_y; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>( exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(&y); tinyexr::swap4(&data_len); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_x - exr_header->data_window.min_x == std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; if (exr_header->data_window.max_y < exr_header->data_window.min_y || exr_header->data_window.max_y - exr_header->data_window.min_y == std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if (data_width > threshold) { tinyexr::SetErrorMessage("data width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (data_height > threshold) { tinyexr::SetErrorMessage("data height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } if (exr_header->tile_size_x > data_width || exr_header->tile_size_x < 1 || exr_header->tile_size_y > data_height || exr_header->tile_size_y < 1) { tinyexr::SetErrorMessage("tile sizes are invalid.", err); return TINYEXR_ERROR_INVALID_DATA; } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } #if 1 FreeEXRImage(exr_image); #else // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } #endif } return ret; } } static void GetLayers(const EXRHeader &exr_header, std::vector<std::string> &layer_names) { // Naive implementation // Group channels by layers // go over all channel names, split by periods // collect unique names layer_names.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string full_name(exr_header.channels[c].name); const size_t pos = full_name.find_last_of('.'); if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) { full_name.erase(pos); if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end()) layer_names.push_back(full_name); } } } struct LayerChannel { explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {} size_t index; std::string name; }; static void ChannelsInLayer(const EXRHeader &exr_header, const std::string layer_name, std::vector<LayerChannel> &channels) { channels.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string ch_name(exr_header.channels[c].name); if (layer_name.empty()) { const size_t pos = ch_name.find_last_of('.'); if (pos != std::string::npos && pos < ch_name.size()) { ch_name = ch_name.substr(pos + 1); } } else { const size_t pos = ch_name.find(layer_name + '.'); if (pos == std::string::npos) continue; if (pos == 0) { ch_name = ch_name.substr(layer_name.size() + 1); } } LayerChannel ch(size_t(c), ch_name); channels.push_back(ch); } } } // namespace tinyexr int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) { EXRVersion exr_version; EXRHeader exr_header; InitEXRHeader(&exr_header); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } std::vector<std::string> layer_vec; tinyexr::GetLayers(exr_header, layer_vec); (*num_layers) = int(layer_vec.size()); (*layer_names) = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size()))); for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) { #ifdef _MSC_VER (*layer_names)[c] = _strdup(layer_vec[c].c_str()); #else (*layer_names)[c] = strdup(layer_vec[c].c_str()); #endif } FreeEXRHeader(&exr_header); return TINYEXR_SUCCESS; } int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */ NULL, err); } int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } // TODO: Probably limit loading to layers (channels) selected by layer index { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; std::vector<std::string> layer_names; tinyexr::GetLayers(exr_header, layer_names); std::vector<tinyexr::LayerChannel> channels; tinyexr::ChannelsInLayer( exr_header, layername == NULL ? "" : std::string(layername), channels); if (channels.size() < 1) { tinyexr::SetErrorMessage("Layer Not Found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_LAYER_NOT_FOUND; } size_t ch_count = channels.size() < 4 ? channels.size() : 4; for (size_t c = 0; c < ch_count; c++) { const tinyexr::LayerChannel &ch = channels[c]; if (ch.name == "R") { idxR = int(ch.index); } else if (ch.name == "G") { idxG = int(ch.index); } else if (ch.name == "B") { idxB = int(ch.index); } else if (ch.name == "A") { idxA = int(ch.index); } } if (channels.size() == 1) { int chIdx = int(channels.front().index); // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * static_cast<int>(exr_header.tile_size_x) + i; const int jj = exr_image.tiles[it].offset_y * static_cast<int>(exr_header.tile_size_y) + j; const int idx = ii + jj * static_cast<int>(exr_image.width); // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int IsEXR(const char *filename) { EXRVersion exr_version; int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to parse EXR version. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } // TODO(syoyo): Refactor removing same code as used in LoadEXR(). if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // TODO(syoyo): Support non RGBA image. if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(&comp); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(&data[0]); tinyexr::swap4(&data[1]); tinyexr::swap4(&data[2]); tinyexr::swap4(&data[3]); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(&aspectRatio); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(&center[0]); tinyexr::swap4(&center[1]); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(&w); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { std::string e; bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes, &e); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // TODO(LTE): C++11 thread // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(&f32.f); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } size_t totalSize = static_cast<size_t>(offset); { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } if (memory.size() == 0) { tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char *>(malloc(totalSize)); memcpy((*memory_out), &memory.at(0), memory.size()); unsigned char *memory_ptr = *memory_out + memory.size(); for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size()); memory_ptr += data_list[i].size(); } return totalSize; // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } #else // Unknown compiler fp = fopen(filename, "wb"); #endif #else fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { std::stringstream ss; ss << "Failed to parse attribute\n"; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(&dx); tinyexr::swap4(&dy); tinyexr::swap4(&dw); tinyexr::swap4(&dh); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(&x); tinyexr::swap4(&y); tinyexr::swap4(&w); tinyexr::swap4(&h); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(&line_no); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (err != 0) { // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEFINED #endif // TINYEXR_IMPLEMENTATION
GGanalysis.c
/* GGanalysis.c 原神抽卡概率计算工具包 GGanalysis by 一棵平衡树OneBST */ #include "GGanalysis.h" #include "MyTools.h" /*计算抽到物品的分布情况*/ double* calc_distribution(double* pity_p, int pity_pos) { double* ans = malloc((pity_pos+1) * sizeof(double)); double state = 1; int i; for(i=1; i<=pity_pos; i++) { ans[i] = state * pity_p[i]; state = state * (1.0 - pity_p[i]); } return ans; } /*带保底物品DP*/ void APICALL pity_item_DP( double* ans, //DP结果存放数组 int item_num, //计算物品抽取数量 int calc_pull, //计算抽数 double* pity_p, //概率提升表 int pity_pos, //保底抽数 int pull_state //垫抽情况 ) { int i,j,pull; //DP数组声明 double** M; double* temp_storage = malloc((item_num+1) * (calc_pull+1) * sizeof(double)); for(i=0; i<(item_num+1)*(calc_pull+1); i++) temp_storage[i] = 0; M = build_2D_index(temp_storage, item_num+1, calc_pull+1); //用于状态转移的概率 从恰好抽到一个物品转移到恰好抽到另一个物品 double* p_normal = calc_distribution(pity_p, pity_pos); //处理垫抽的预处理 //用于处理初始有垫抽数情况的修正常数 double fix_const = 0; for(i=pull_state+1; i<=pity_pos; i++) fix_const = fix_const + p_normal[i]; //修正转移概率 double* p_first = malloc((pity_pos+1) * sizeof(double)); for(i=pull_state+1; i<=pity_pos; i++) p_first[i-pull_state] = p_normal[i]/fix_const; for(i=pity_pos-pull_state+1; i<=pity_pos; i++) p_first[i] = 0; //设置DP初始条件 M[0][0] = 1; double* p_trans; //恰好在本抽抽到概率 int last_pos; //开始转移概率位置 //DP部分 for(i=1; i<=item_num; i++) { for(j=1; j<=calc_pull; j++) { for(pull=1; pull<=(pity_pos>j?j:pity_pos); pull++) { last_pos = j-pull; if(last_pos) //非从零开始 p_trans = p_normal; else //从零开始 p_trans = p_first; //想要的物品 M[i][j] += M[i-1][last_pos] * p_trans[pull]; } } } /*结果提取部分*/ int temp_pos; for(i=0; i<(item_num+1)*(calc_pull+1); i++) ans[i] = 0; //不累加,返回恰好在某抽抽到的概率 for(i=1; i<=item_num; i++) for(j=1; j<=calc_pull; j++) { temp_pos = i * (calc_pull+1) + j; ans[temp_pos] = M[i][j]; } return; } /*UP类型物品DP*/ void APICALL GI_upitem_DP( double* ans, //DP结果存放数组 int item_num, //计算物品抽取数量 int calc_pull, //计算抽数 double* pity_p, //概率提升表 int pity_pos, //保底抽数 double up_rate, //UP概率 int up_type, //UP物品种类 int pull_state, //垫抽情况 int up_guarantee, //大保底情况 int want_in_stander,//想要的是否在常驻 int up_in_stander, //UP物品在常驻池中的数量 int stander_num //常驻池中物品数量 ) { int i,j,pull; //DP数组声明 double*** M; double* temp_storage = malloc((item_num+1) * (calc_pull+1) * 3 * sizeof(double)); for(i=0; i<(item_num+1)*(calc_pull+1)*3; i++) temp_storage[i] = 0; //M中最后一个维度 0表示抽到想要UP 1表示抽到不想要UP 2表示没抽到UP M = build_3D_index(temp_storage, item_num+1, calc_pull+1, 3); //用于状态转移的概率 从恰好抽到一个物品转移到恰好抽到另一个物品 double* p_normal = calc_distribution(pity_p, pity_pos); //处理垫抽的预处理 //用于处理初始有垫抽数情况的修正常数 double fix_const = 0; for(i=pull_state+1; i<=pity_pos; i++) fix_const = fix_const + p_normal[i]; //修正转移概率 double* p_first = malloc((pity_pos+1) * sizeof(double)); for(i=pull_state+1; i<=pity_pos; i++) p_first[i-pull_state] = p_normal[i]/fix_const; for(i=pity_pos-pull_state+1; i<=pity_pos; i++) p_first[i] = 0; //设置DP初始条件 //若有大保底 if(up_guarantee) M[0][0][2] = 1; //没有大保底 else M[0][0][0] = 1; double* p_trans; //恰好在本抽抽到概率 double up_trans; //多UP选中想要角色的概率 double stander_trans; //常驻池中选取到想要UP概率 double stander_other; //常驻池中选取到其他UP概率 double stander_notup; //常驻池中选取到非UP概率 int last_pos; //开始转移概率位置 up_trans = 1.0/(double)up_type; //处理从常驻中歪的情况 stander_trans = (double)want_in_stander/(double)stander_num; stander_other = (double)(up_in_stander - want_in_stander)/(double)stander_num; stander_notup = (double)(stander_num - up_in_stander)/(double)stander_num; //DP部分 for(i=0; i<=item_num; i++)//从零开始,用于记录先歪再抽的情况 { for(j=1; j<=calc_pull; j++) { for(pull=1; pull<=(pity_pos>j?j:pity_pos); pull++) { last_pos = j-pull; if(last_pos) //非从零开始 p_trans = p_normal; else //从零开始 p_trans = p_first; //想要的UP if(i) { M[i][j][0] += up_rate * M[i-1][last_pos][0] * up_trans * p_trans[pull]; M[i][j][0] += up_rate * M[i-1][last_pos][1] * up_trans * p_trans[pull]; M[i][j][0] += stander_trans * (1.0-up_rate) * M[i-1][last_pos][0] * p_trans[pull]; M[i][j][0] += stander_trans * (1.0-up_rate) * M[i-1][last_pos][1] * p_trans[pull]; M[i][j][0] += M[i-1][last_pos][2] * up_trans * p_trans[pull]; } //其他UP if(up_type>1) { M[i][j][1] += up_rate * M[i][last_pos][0] * (1.0-up_trans) * p_trans[pull]; M[i][j][1] += up_rate * M[i][last_pos][1] * (1.0-up_trans) * p_trans[pull]; M[i][j][1] += stander_other * (1.0-up_rate) * M[i][last_pos][0] * p_trans[pull]; M[i][j][1] += stander_other * (1.0-up_rate) * M[i][last_pos][1] * p_trans[pull]; M[i][j][1] += M[i][last_pos][2] * (1.0-up_trans) * p_trans[pull]; } //非UP M[i][j][2] += stander_notup * (1.0-up_rate) * M[i][last_pos][0] * p_trans[pull]; M[i][j][2] += stander_notup * (1.0-up_rate) * M[i][last_pos][1] * p_trans[pull]; } } } /*结果提取部分*/ int temp_pos; for(i=0; i<(item_num+1)*(calc_pull+1); i++) ans[i] = 0; //梯形累加 /* for(i=1; i<=item_num; i++) for(j=1; j<=calc_pull; j++) { temp_pos = i * (calc_pull+1) + j; ans[temp_pos] = ans[temp_pos-1] + M[i][j][0]; } */ //不累加,返回恰好在某抽抽到的概率 for(i=1; i<=item_num; i++) for(j=1; j<=calc_pull; j++) { temp_pos = i * (calc_pull+1) + j; ans[temp_pos] = M[i][j][0]; } return; } /*武器池神铸定轨DP*/ void APICALL GI_weapon_EP_DP( double* ans, //DP结果存放数组 int item_num, //计算物品抽取数量 int calc_pull, //计算抽数 double* pity_p, //概率提升表 int pity_pos, //保底抽数 double up_rate, //UP概率 int up_type, //UP物品种类 int pull_state, //垫抽情况 int up_guarantee, //大保底情况 int want_in_stander,//想要的是否在常驻 int up_in_stander, //UP物品在常驻池中的数量 int stander_num //常驻池中物品数量 ) { int i,j,pull; //DP数组声明 double*** M; double* temp_storage = malloc((item_num+1) * (calc_pull+1) * 4 * sizeof(double)); for(i=0; i<(item_num+1)*(calc_pull+1)*4; i++) temp_storage[i] = 0; M = build_3D_index(temp_storage, item_num+1, calc_pull+1, 4); //用于状态转移的概率 从恰好抽到一个物品转移到恰好抽到另一个物品 double* p_normal = calc_distribution(pity_p, pity_pos); //处理垫抽的预处理 //用于处理初始有垫抽数情况的修正常数 double fix_const = 0; for(i=pull_state+1; i<=pity_pos; i++) fix_const = fix_const + p_normal[i]; //修正转移概率 double* p_first = malloc((pity_pos+1) * sizeof(double)); for(i=pull_state+1; i<=pity_pos; i++) p_first[i-pull_state] = p_normal[i]/fix_const; for(i=pity_pos-pull_state+1; i<=pity_pos; i++) p_first[i] = 0; //设置DP初始条件 //up_guarantee 中数字表示初始状态 M[0][0][up_guarantee] = 1; double* p_trans; //恰好在本抽抽到概率 double up_trans; //多UP选中想要角色的概率 double stander_trans; //常驻池中选取到想要UP概率 double stander_other; //常驻池中选取到其他UP概率 double stander_notup; //常驻池中选取到非UP概率 int last_pos; //开始转移概率位置 up_trans = 1.0/(double)up_type; //处理从常驻中歪的情况 stander_trans = (double)want_in_stander/(double)stander_num; stander_other = (double)(up_in_stander - want_in_stander)/(double)stander_num; stander_notup = (double)(stander_num - up_in_stander)/(double)stander_num; //DP部分 for(i=0; i<=item_num; i++)//从零开始,用于记录先歪再抽的情况 { for(j=1; j<=calc_pull; j++) { for(pull=1; pull<=(pity_pos>j?j:pity_pos); pull++) { last_pos = j-pull; if(last_pos) //非从零开始 p_trans = p_normal; else //从零开始 p_trans = p_first; /* M中维度说明 0 表示抽到想要的UP 命定值为0 1 表示抽到不想要UP 命定值为1 2 表示抽到了常驻 命定值为1 3 表示 命定值为2 */ //想要的UP if(i) { M[i][j][0] += up_rate * M[i-1][last_pos][0] * up_trans * p_trans[pull]; M[i][j][0] += up_rate * M[i-1][last_pos][1] * up_trans * p_trans[pull]; M[i][j][0] += M[i-1][last_pos][3] * p_trans[pull]; M[i][j][0] += stander_trans * (1.0-up_rate) * M[i-1][last_pos][0] * p_trans[pull]; M[i][j][0] += stander_trans * (1.0-up_rate) * M[i-1][last_pos][1] * p_trans[pull]; M[i][j][0] += M[i-1][last_pos][2] * up_trans * p_trans[pull]; } //抽到不想要UP 命定值为1 M[i][j][1] += up_rate * M[i][last_pos][0] * (1.0-up_trans) * p_trans[pull]; M[i][j][1] += stander_other * (1.0-up_rate) * M[i][last_pos][0] * p_trans[pull]; //抽到了常驻 命定值为1 M[i][j][2] += stander_notup * (1.0-up_rate) * M[i][last_pos][0] * p_trans[pull]; // 命定值为2 M[i][j][3] += up_rate * M[i][last_pos][1] * (1.0-up_trans) * p_trans[pull]; M[i][j][3] += stander_other * (1.0-up_rate) * M[i][last_pos][1] * p_trans[pull]; M[i][j][3] += stander_notup * (1.0-up_rate) * M[i][last_pos][1] * p_trans[pull]; M[i][j][3] += M[i][last_pos][2] * (1.0-up_trans) * p_trans[pull]; } } } /*结果提取部分*/ int temp_pos; for(i=0; i<(item_num+1)*(calc_pull+1); i++) ans[i] = 0; //返回恰好在某抽抽到的概率 for(i=1; i<=item_num; i++) for(j=1; j<=calc_pull; j++) { temp_pos = i * (calc_pull+1) + j; ans[temp_pos] = M[i][j][0]; } return; } /*常驻池DP*/ void APICALL GI_stander_DP( double* ans, //DP结果存放数组 int item_num, //计算物品抽取数量 int calc_pull, //计算抽数 double* pity_p, //概率提升表 int pity_pos, //保底抽数 double* hit_p, //类别概率表 int hit_pos, //类别保底 int pull_state, //垫抽情况 int type_state, //多少抽没有异类物品 int last_type, //上次五星种类 int stander_num, //常驻池中本类物品数量 int collect_all //是否计算收集齐全概率 ) { int i,j,k,pull; //DP数组声明 double ***M, ***O; double* temp_storage_M = malloc((item_num+1) * (calc_pull+1) * (hit_pos+1) * sizeof(double)); double* temp_storage_O = malloc((item_num+1) * (calc_pull+1) * (hit_pos+1) * sizeof(double)); for(i=0; i<(item_num+1)*(calc_pull+1)*(hit_pos+1); i++) { temp_storage_M[i] = 0; temp_storage_O[i] = 0; } M = build_3D_index(temp_storage_M, item_num+1, calc_pull+1, hit_pos+1); O = build_3D_index(temp_storage_O, item_num+1, calc_pull+1, hit_pos+1); //用于状态转移的概率 从恰好抽到一个物品转移到恰好抽到另一个物品 double* p_normal = calc_distribution(pity_p, pity_pos); //处理垫抽的预处理 //用于处理初始有垫抽数情况的修正常数 double fix_const = 0; for(i=pull_state+1; i<=pity_pos; i++) fix_const = fix_const + p_normal[i]; //修正转移概率 double* p_first = malloc((pity_pos+1) * sizeof(double)); for(i=pull_state+1; i<=pity_pos; i++) p_first[i-pull_state] = p_normal[i]/fix_const; for(i=pity_pos-pull_state+1; i<=pity_pos; i++) p_first[i] = 0; //设置DP初始条件 if(last_type == 0) //上个物品是本类 M[0][0][type_state] = 1; else //上个物品是异类 O[0][0][type_state] = 1; int last_pos; //开始转移概率位置 int fix_pull; //修正非0开始的平稳参数 double p_trans; //恰好在本抽抽到概率 double p_get; //常驻本类所有物品中得到想要物品的概率 double p_not_get; //算法需要值 double p_hit; //异类转移到本类别的概率 /*DP部分*/ for(i=0; i<=item_num; i++) //从零开始,用于记录先歪再抽的情况 { //设定转移到想要物品概率 p_get = 1.0/(double)stander_num; p_not_get = (double)(stander_num-1)/(double)stander_num; if(collect_all) //如果是考虑收集齐全的情况 { if(i) { p_get = (double)(stander_num-i+1)/(double)stander_num; p_not_get = (double)(i)/(double)stander_num; } else { p_get = 1.0; p_not_get = 0.0; } } for(j=1; j<=calc_pull; j++) //计算抽到j抽时候的情况 { /* //编译加入并行优化 #ifdef OPENMP_PARALLEL #pragma omp parallel for #endif */ for(k=0; k<=hit_pos; k++) //枚举另一类物品已经多久没抽到了 { for(pull=1; pull<=(pity_pos>j?j:pity_pos); pull++) { //等级保底 last_pos = j-pull; //上个物品位置 fix_pull = pull; if(last_pos) //非从零开始 p_trans = p_normal[pull]; else //从零开始 { p_trans = p_first[pull]; fix_pull = pull + pull_state; } //类别保底 以下参数用于不同类别转移 if(pull+k > hit_pos) p_hit = 1; else p_hit = hit_p[pull+k]; //状态转移 //本类到本类状态转移 if(k-pull >= 0) { //抽到了 if(i) M[i][j][k] += M[i-1][last_pos][k-pull] * p_trans * (1.0-hit_p[k]) * p_get; //没抽到 M[i][j][k] += M[i][last_pos][k-pull] * p_trans * (1.0-hit_p[k]) * p_not_get; } //异类到本类的转移 if(i) M[i][j][fix_pull] += O[i-1][last_pos][k] * p_trans * p_hit * p_get; M[i][j][fix_pull] += O[i][last_pos][k] * p_trans * p_hit * p_not_get; //异类到异类的转移 if(k-pull >= 0) O[i][j][k] += O[i][last_pos][k-pull] * p_trans * (1.0-hit_p[k]); //本类到异类的转移 O[i][j][fix_pull] += M[i][last_pos][k] * p_trans * p_hit; } } } } /*结果提取部分*/ int temp_pos; for(i=0; i<(item_num+1)*(calc_pull+1); i++) ans[i] = 0; //返回恰好在某抽抽到的概率 for(i=1; i<=item_num; i++) //抽到了i个物品 { //设定转移到想要物品概率 p_get = 1.0/(double)stander_num; if(collect_all) //如果是考虑收集齐全的情况 { if(i) p_get = (double)(stander_num-i+1)/(double)stander_num; else continue; } for(j=1; j<=calc_pull; j++) //计算抽到j抽时候的情况 { /* //编译加入并行优化 #ifdef OPENMP_PARALLEL #pragma omp parallel for #endif */ for(k=0; k<=hit_pos; k++) //枚举另一类物品已经多久没抽到了 { for(pull=1; pull<=(pity_pos>j?j:pity_pos); pull++) { //等级保底 last_pos = j-pull; //上个物品位置 if(last_pos) //非从零开始 p_trans = p_normal[pull]; else //从零开始 p_trans = p_first[pull]; //跨类别保底 if(pull+k > hit_pos) p_hit = 1; else p_hit = hit_p[pull+k]; //提取结果 temp_pos = i * (calc_pull+1) + j; if(k-pull >= 0) ans[temp_pos] += M[i-1][last_pos][k-pull] * p_trans * (1.0-hit_p[k]) * p_get; ans[temp_pos] += O[i-1][last_pos][k] * p_trans * p_hit * p_get; } } } } return; }
a.c
#include <stdio.h> #include <omp.h> int main() { #pragma omp parallel { #pragma omp single { printf("one "); printf("two "); printf("three "); } } printf("\n"); return 0; }
mapOptmization.h
#ifndef MAPOPTMIZATION_H #define MAPOPTMIZATION_H #include "utility.h" #include "feature_matching/cloud_info.h" #include "tic_toc.hpp" #include <gtsam/geometry/Rot3.h> #include <gtsam/geometry/Pose3.h> #include <gtsam/slam/PriorFactor.h> #include <gtsam/slam/BetweenFactor.h> #include <gtsam/navigation/GPSFactor.h> #include <gtsam/navigation/ImuFactor.h> #include <gtsam/navigation/CombinedImuFactor.h> #include <gtsam/nonlinear/NonlinearFactorGraph.h> #include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h> #include <gtsam/nonlinear/Marginals.h> #include <gtsam/nonlinear/Values.h> #include <gtsam/inference/Symbol.h> #include <gtsam/nonlinear/ISAM2.h> using namespace gtsam; using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y) using symbol_shorthand::V; // Vel (xdot,ydot,zdot) using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz) using symbol_shorthand::G; // GPS pose /* * A point cloud type that has 6D pose info ([x,y,z,roll,pitch,yaw] intensity is time stamp) */ struct PointXYZIRPYT { PCL_ADD_POINT4D PCL_ADD_INTENSITY; // preferred way of adding a XYZ+padding float roll; float pitch; float yaw; double time; EIGEN_MAKE_ALIGNED_OPERATOR_NEW // make sure our new allocators are aligned } EIGEN_ALIGN16; // enforce SSE padding for correct memory alignment POINT_CLOUD_REGISTER_POINT_STRUCT (PointXYZIRPYT, (float, x, x) (float, y, y) (float, z, z) (float, intensity, intensity) (float, roll, roll) (float, pitch, pitch) (float, yaw, yaw) (double, time, time)) typedef PointXYZIRPYT PointTypePose; class mapOptimization : public ParamServer { public: // gtsam NonlinearFactorGraph gtSAMgraph; Values initialEstimate; Values optimizedEstimate; ISAM2 *isam; Values isamCurrentEstimate; Eigen::MatrixXd poseCovariance; ros::Publisher pubLaserCloudSurround; ros::Publisher pubOdomAftMappedROS; ros::Publisher pubKeyPoses; ros::Publisher pubPath; ros::Publisher pubHistoryKeyFrames; ros::Publisher pubIcpKeyFrames; ros::Publisher pubRecentKeyFrames; ros::Publisher pubRecentKeyFrame; ros::Publisher pubCloudRegisteredRaw; ros::Subscriber subLaserCloudInfo; ros::Subscriber subGPS; std::deque<nav_msgs::Odometry> gpsQueue; feature_matching::cloud_info cloudInfo; vector<pcl::PointCloud<PointType>::Ptr> cornerCloudKeyFrames; vector<pcl::PointCloud<PointType>::Ptr> surfCloudKeyFrames; pcl::PointCloud<PointType>::Ptr cloudKeyPoses3D; // 关键帧位姿节点? pcl::PointCloud<PointTypePose>::Ptr cloudKeyPoses6D; pcl::PointCloud<PointType>::Ptr laserCloudCornerLast; // corner feature set from odoOptimization pcl::PointCloud<PointType>::Ptr laserCloudSurfLast; // surf feature set from odoOptimization pcl::PointCloud<PointType>::Ptr laserCloudCornerLastDS; // downsampled corner featuer set from odoOptimization 降采样后的边缘点特征 pcl::PointCloud<PointType>::Ptr laserCloudSurfLastDS; // downsampled surf featuer set from odoOptimization pcl::PointCloud<PointType>::Ptr laserCloudOri; // 特征点(边缘点+平面点) pcl::PointCloud<PointType>::Ptr coeffSel; // 与laserCloudOri 对应的系数 std::vector<PointType> laserCloudOriCornerVec; // corner point holder for parallel computation std::vector<PointType> coeffSelCornerVec; std::vector<bool> laserCloudOriCornerFlag; std::vector<PointType> laserCloudOriSurfVec; // surf point holder for parallel computation std::vector<PointType> coeffSelSurfVec; std::vector<bool> laserCloudOriSurfFlag; pcl::PointCloud<PointType>::Ptr laserCloudCornerFromMap; pcl::PointCloud<PointType>::Ptr laserCloudSurfFromMap; pcl::PointCloud<PointType>::Ptr laserCloudCornerFromMapDS; // 世界坐标系下的 局部边缘线特征地图 pcl::PointCloud<PointType>::Ptr laserCloudSurfFromMapDS; pcl::PointCloud<PointType>::Ptr corner_GlobalMap; pcl::PointCloud<PointType>::Ptr surf_GlobalMap; pcl::KdTreeFLANN<PointType>::Ptr kdtreeCornerFromMap; pcl::KdTreeFLANN<PointType>::Ptr kdtreeSurfFromMap; pcl::KdTreeFLANN<PointType>::Ptr kdtreeSurroundingKeyPoses; pcl::KdTreeFLANN<PointType>::Ptr kdtreeHistoryKeyPoses; pcl::PointCloud<PointType>::Ptr latestKeyFrameCloud; // [闭环用]最新帧的特征点点云(已经变换到世界坐标系的) pcl::PointCloud<PointType>::Ptr nearHistoryKeyFrameCloud; // [闭环用]闭环关键帧附近合并形成的局部地图(已经变换到世界坐标系的) pcl::VoxelGrid<PointType> downSizeFilterCorner; pcl::VoxelGrid<PointType> downSizeFilterSurf; pcl::VoxelGrid<PointType> downSizeFilterICP; pcl::VoxelGrid<PointType> downSizeFilterSurroundingKeyPoses; // for surrounding key poses of scan-to-map optimization // 点云回调,取点云时间戳 ros::Time timeLaserInfoStamp; double timeLaserCloudInfoLast; float transformTobeMapped[6]; //(roll,pitch,yaw,x,y,z) 相机位姿 std::mutex mtx; double timeLastProcessing = -1; bool isDegenerate = false; Eigen::Matrix<float, 6, 6> matP; int laserCloudCornerFromMapDSNum = 0; int laserCloudSurfFromMapDSNum = 0; int laserCloudCornerLastDSNum = 0; //当前帧点云 边缘点特征 int laserCloudSurfLastDSNum = 0; bool aLoopIsClosed = false; int imuPreintegrationResetId = 0; nav_msgs::Path globalPath; Eigen::Affine3f transPointAssociateToMap; //当前帧点云对应的预积分的位姿估计 // 构造函数 mapOptimization() { // ISAM2 ISAM2Params parameters; parameters.relinearizeThreshold = 0.1; parameters.relinearizeSkip = 1; isam = new ISAM2(parameters); // 发布 // 关键帧点云 pubKeyPoses = nh.advertise<sensor_msgs::PointCloud2>("lio_sam_custom/mapping/trajectory", 1); // 局部点云地图 pubLaserCloudSurround = nh.advertise<sensor_msgs::PointCloud2>("lio_sam_custom/mapping/map_global", 1); // 匹配? pubOdomAftMappedROS = nh.advertise<nav_msgs::Odometry> ("lio_sam_custom/mapping/odometry", 1); // path pubPath = nh.advertise<nav_msgs::Path>("lio_sam_custom/mapping/path", 1); // 订阅 // 1. 提取特征的点云以及IMU积分估计位姿 subLaserCloudInfo = nh.subscribe<feature_matching::cloud_info>("lio_sam_custom/feature/cloud_info", 10, &mapOptimization::laserCloudInfoHandler, this, ros::TransportHints().tcpNoDelay()); // 2. GPS //subGPS = nh.subscribe<nav_msgs::Odometry> (gpsTopic, 200, &mapOptimization::gpsHandler, this, ros::TransportHints().tcpNoDelay()); // 发布 // 历史关键帧 pubHistoryKeyFrames = nh.advertise<sensor_msgs::PointCloud2>("lio_sam_custom/mapping/icp_loop_closure_history_cloud", 1); // ICP回环点云 pubIcpKeyFrames = nh.advertise<sensor_msgs::PointCloud2>("lio_sam_custom/mapping/icp_loop_closure_corrected_cloud", 1); // 最近几帧 pubRecentKeyFrames = nh.advertise<sensor_msgs::PointCloud2>("lio_sam_custom/mapping/map_local", 1); // 当前帧配准完成后的点云 pubRecentKeyFrame = nh.advertise<sensor_msgs::PointCloud2>("lio_sam_custom/mapping/cloud_registered", 1); pubCloudRegisteredRaw = nh.advertise<sensor_msgs::PointCloud2>("lio_sam_custom/mapping/cloud_registered_raw", 1); // 降采样参数 downSizeFilterCorner.setLeafSize(mappingCornerLeafSize, mappingCornerLeafSize, mappingCornerLeafSize); downSizeFilterSurf.setLeafSize(mappingSurfLeafSize, mappingSurfLeafSize, mappingSurfLeafSize); downSizeFilterICP.setLeafSize(mappingSurfLeafSize, mappingSurfLeafSize, mappingSurfLeafSize); downSizeFilterSurroundingKeyPoses.setLeafSize(surroundingKeyframeDensity, surroundingKeyframeDensity, surroundingKeyframeDensity); // for surrounding key poses of scan-to-map optimization allocateMemory(); } void allocateMemory() { cloudKeyPoses3D.reset(new pcl::PointCloud<PointType>()); cloudKeyPoses6D.reset(new pcl::PointCloud<PointTypePose>()); kdtreeSurroundingKeyPoses.reset(new pcl::KdTreeFLANN<PointType>()); kdtreeHistoryKeyPoses.reset(new pcl::KdTreeFLANN<PointType>()); corner_GlobalMap.reset(new pcl::PointCloud<PointType>()); surf_GlobalMap.reset(new pcl::PointCloud<PointType>()); laserCloudCornerLast.reset(new pcl::PointCloud<PointType>()); // corner feature set from odoOptimization laserCloudSurfLast.reset(new pcl::PointCloud<PointType>()); // surf feature set from odoOptimization laserCloudCornerLastDS.reset(new pcl::PointCloud<PointType>()); // downsampled corner featuer set from odoOptimization laserCloudSurfLastDS.reset(new pcl::PointCloud<PointType>()); // downsampled surf featuer set from odoOptimization laserCloudOri.reset(new pcl::PointCloud<PointType>()); coeffSel.reset(new pcl::PointCloud<PointType>()); laserCloudOriCornerVec.resize(N_SCAN * Horizon_SCAN); coeffSelCornerVec.resize(N_SCAN * Horizon_SCAN); laserCloudOriCornerFlag.resize(N_SCAN * Horizon_SCAN); laserCloudOriSurfVec.resize(N_SCAN * Horizon_SCAN); coeffSelSurfVec.resize(N_SCAN * Horizon_SCAN); laserCloudOriSurfFlag.resize(N_SCAN * Horizon_SCAN); std::fill(laserCloudOriCornerFlag.begin(), laserCloudOriCornerFlag.end(), false); std::fill(laserCloudOriSurfFlag.begin(), laserCloudOriSurfFlag.end(), false); laserCloudCornerFromMap.reset(new pcl::PointCloud<PointType>()); laserCloudSurfFromMap.reset(new pcl::PointCloud<PointType>()); laserCloudCornerFromMapDS.reset(new pcl::PointCloud<PointType>()); laserCloudSurfFromMapDS.reset(new pcl::PointCloud<PointType>()); kdtreeCornerFromMap.reset(new pcl::KdTreeFLANN<PointType>()); kdtreeSurfFromMap.reset(new pcl::KdTreeFLANN<PointType>()); latestKeyFrameCloud.reset(new pcl::PointCloud<PointType>()); nearHistoryKeyFrameCloud.reset(new pcl::PointCloud<PointType>()); for (int i = 0; i < 6; ++i){ transformTobeMapped[i] = 0; } matP.setZero(); { // 读取地图文件 pcl::io::loadPCDFile(std::getenv("HOME")+savePCDDirectory + "cloudCorner.pcd", *corner_GlobalMap); pcl::io::loadPCDFile(std::getenv("HOME")+savePCDDirectory + "cloudSurf.pcd", *surf_GlobalMap); /// 降采样 // Downsample the surrounding corner key frames (or map) downSizeFilterCorner.setInputCloud(corner_GlobalMap); downSizeFilterCorner.filter(*corner_GlobalMap); //laserCloudCornerFromMapDS = corner_GlobalMap; //laserCloudCornerFromMapDSNum = laserCloudCornerFromMapDS->size(); // Downsample the surrounding surf key frames (or map) downSizeFilterSurf.setInputCloud(surf_GlobalMap); downSizeFilterSurf.filter(*surf_GlobalMap); //laserCloudSurfFromMapDS = surf_GlobalMap; //laserCloudSurfFromMapDSNum = laserCloudSurfFromMapDS->size(); } } void registration(const feature_matching::cloud_info& cloud_info_, Eigen::Affine3f& pose_guess_){ // extract time stamp // 取时间戳 timeLaserInfoStamp = cloud_info_.header.stamp; timeLaserCloudInfoLast = cloud_info_.header.stamp.toSec(); // extract info and feature cloud // 分别取角点、平面点 cloudInfo = cloud_info_; pcl::fromROSMsg(cloud_info_.cloud_corner, *laserCloudCornerLast); pcl::fromROSMsg(cloud_info_.cloud_surface, *laserCloudSurfLast); // 线程锁 std::lock_guard<std::mutex> lock(mtx); // 如果当前帧点云时间戳 - 上一次处理点云的时间戳 > 设定间隔 if (timeLaserCloudInfoLast - timeLastProcessing >= mappingProcessInterval) { timeLastProcessing = timeLaserCloudInfoLast; std::cout<<"InPut: "<<pose_guess_.translation().transpose()<<std::endl; std::vector<float> origin = {pose_guess_.translation().x(), pose_guess_.translation().y(), pose_guess_.translation().z()}; std::vector<float> edge; std::vector<float> size ={-30.0, 30.0, -30.0, 30.0, -10.0, 10.0}; // 取局部地图 pcl::CropBox<PointType> pcl_box_filter_; for (size_t i = 0; i < origin.size(); ++i) { edge.emplace_back(size.at(2 * i) + origin.at(i)); edge.emplace_back(size.at(2 * i + 1) + origin.at(i)); } // 根据网格划分边缘,从全地图中裁剪,得到局部地图 laserCloudCornerFromMapDS->clear(); pcl_box_filter_.setMin(Eigen::Vector4f(edge.at(0), edge.at(2), edge.at(4), 1.0e-6)); pcl_box_filter_.setMax(Eigen::Vector4f(edge.at(1), edge.at(3), edge.at(5), 1.0e6)); // 然后再次滤波 pcl_box_filter_.setInputCloud(corner_GlobalMap); pcl_box_filter_.filter(*laserCloudCornerFromMapDS); laserCloudCornerFromMapDSNum = laserCloudCornerFromMapDS->size(); laserCloudSurfFromMapDS->clear(); pcl_box_filter_.setInputCloud(surf_GlobalMap); pcl_box_filter_.filter(*laserCloudSurfFromMapDS); laserCloudSurfFromMapDSNum = laserCloudSurfFromMapDS->size(); //更新初始化位姿估计 transformTobeMapped //updateInitialGuess(); // 分解transFinal,储存到transformTobeMapped pcl::getTranslationAndEulerAngles(pose_guess_, transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5], transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]); //对当前帧点云的边缘点、平面点集合进行降采样 downsampleCurrentScan(); TicToc tic_; //根据边缘点特征、平面点特征,进行匹配,然后LM优化 Lidar位姿 scan2MapOptimization(); std::cout<<"LM 耗时: "<<tic_.toc()<<std::endl; //判断是否关键帧,然后加入因子,因子图优化 //saveKeyFramesAndFactor(); //如果有回环,则更新全局位姿 //correctPoses(); pose_guess_=pcl::getTransformation(transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5],transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]); std::cout<<"OutPut: "<<pose_guess_.translation().transpose()<<std::endl; // // 匹配之后判断是否需要更新局部地图 // for (int i = 0; i < 3; i++) { // if (fabs(cloud_pose(i, 3) - edge.at(2 * i)) > 50.0 && // fabs(cloud_pose(i, 3) - edge.at(2 * i + 1)) > 50.0) // continue; // ResetLocalMap(cloud_pose(0,3), cloud_pose(1,3), cloud_pose(2,3)); // break; // } publishOdometry(); publishFrames(); } } // 点云回调 void laserCloudInfoHandler(const feature_matching::cloud_infoConstPtr& msgIn) { // extract time stamp // 取时间戳 timeLaserInfoStamp = msgIn->header.stamp; timeLaserCloudInfoLast = msgIn->header.stamp.toSec(); // extract info and feature cloud // 分别取角点、平面点 cloudInfo = *msgIn; pcl::fromROSMsg(msgIn->cloud_corner, *laserCloudCornerLast); pcl::fromROSMsg(msgIn->cloud_surface, *laserCloudSurfLast); // 线程锁 std::lock_guard<std::mutex> lock(mtx); // 如果当前帧点云时间戳 - 上一次处理点云的时间戳 > 设定间隔 if (timeLaserCloudInfoLast - timeLastProcessing >= mappingProcessInterval) { timeLastProcessing = timeLaserCloudInfoLast; //更新初始化位姿估计 transformTobeMapped updateInitialGuess(); //临近关键帧点云合并得到局部边缘点、平面点特征地图 extractSurroundingKeyFrames(); //对当前帧点云的边缘点、平面点集合进行降采样 downsampleCurrentScan(); //根据边缘点特征、平面点特征,进行匹配,然后LM优化 Lidar位姿 scan2MapOptimization(); //判断是否关键帧,然后加入因子,因子图优化 saveKeyFramesAndFactor(); //如果有回环,则更新全局位姿 correctPoses(); publishOdometry(); publishFrames(); } } void gpsHandler(const nav_msgs::Odometry::ConstPtr& gpsMsg) { gpsQueue.push_back(*gpsMsg); } // 将激光雷达坐标系的点转换到地图坐标系 void pointAssociateToMap(PointType const * const pi, PointType * const po) { po->x = transPointAssociateToMap(0,0) * pi->x + transPointAssociateToMap(0,1) * pi->y + transPointAssociateToMap(0,2) * pi->z + transPointAssociateToMap(0,3); po->y = transPointAssociateToMap(1,0) * pi->x + transPointAssociateToMap(1,1) * pi->y + transPointAssociateToMap(1,2) * pi->z + transPointAssociateToMap(1,3); po->z = transPointAssociateToMap(2,0) * pi->x + transPointAssociateToMap(2,1) * pi->y + transPointAssociateToMap(2,2) * pi->z + transPointAssociateToMap(2,3); po->intensity = pi->intensity; } pcl::PointCloud<PointType>::Ptr transformPointCloud(pcl::PointCloud<PointType>::Ptr cloudIn, PointTypePose* transformIn) { pcl::PointCloud<PointType>::Ptr cloudOut(new pcl::PointCloud<PointType>()); PointType *pointFrom; int cloudSize = cloudIn->size(); cloudOut->resize(cloudSize); Eigen::Affine3f transCur = pcl::getTransformation(transformIn->x, transformIn->y, transformIn->z, transformIn->roll, transformIn->pitch, transformIn->yaw); for (int i = 0; i < cloudSize; ++i){ pointFrom = &cloudIn->points[i]; cloudOut->points[i].x = transCur(0,0) * pointFrom->x + transCur(0,1) * pointFrom->y + transCur(0,2) * pointFrom->z + transCur(0,3); cloudOut->points[i].y = transCur(1,0) * pointFrom->x + transCur(1,1) * pointFrom->y + transCur(1,2) * pointFrom->z + transCur(1,3); cloudOut->points[i].z = transCur(2,0) * pointFrom->x + transCur(2,1) * pointFrom->y + transCur(2,2) * pointFrom->z + transCur(2,3); cloudOut->points[i].intensity = pointFrom->intensity; } return cloudOut; } gtsam::Pose3 pclPointTogtsamPose3(PointTypePose thisPoint) { return gtsam::Pose3(gtsam::Rot3::RzRyRx(double(thisPoint.roll), double(thisPoint.pitch), double(thisPoint.yaw)), gtsam::Point3(double(thisPoint.x), double(thisPoint.y), double(thisPoint.z))); } gtsam::Pose3 trans2gtsamPose(float transformIn[]) { return gtsam::Pose3(gtsam::Rot3::RzRyRx(transformIn[0], transformIn[1], transformIn[2]), gtsam::Point3(transformIn[3], transformIn[4], transformIn[5])); } Eigen::Affine3f pclPointToAffine3f(PointTypePose thisPoint) { return pcl::getTransformation(thisPoint.x, thisPoint.y, thisPoint.z, thisPoint.roll, thisPoint.pitch, thisPoint.yaw); } Eigen::Affine3f trans2Affine3f(float transformIn[]) { // (x,y,z,roll,pitch,yaw) return pcl::getTransformation(transformIn[3], transformIn[4], transformIn[5], transformIn[0], transformIn[1], transformIn[2]); } PointTypePose trans2PointTypePose(float transformIn[]) { PointTypePose thisPose6D; thisPose6D.x = transformIn[3]; thisPose6D.y = transformIn[4]; thisPose6D.z = transformIn[5]; thisPose6D.roll = transformIn[0]; thisPose6D.pitch = transformIn[1]; thisPose6D.yaw = transformIn[2]; return thisPose6D; } void visualizeGlobalMapThread() { ros::Rate rate(0.2); while (ros::ok()){ rate.sleep(); publishGlobalMap(); } if (savePCD == false) return; cout << "****************************************************" << endl; cout << "Saving map to pcd files ..." << endl; // create directory and remove old files; savePCDDirectory = std::getenv("HOME") + savePCDDirectory; int unused = system((std::string("exec rm -r ") + savePCDDirectory).c_str()); unused = system((std::string("mkdir ") + savePCDDirectory).c_str()); // save key frame transformations pcl::io::savePCDFileASCII(savePCDDirectory + "trajectory.pcd", *cloudKeyPoses3D); pcl::io::savePCDFileASCII(savePCDDirectory + "transformations.pcd", *cloudKeyPoses6D); // extract global point cloud map pcl::PointCloud<PointType>::Ptr globalCornerCloud(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalCornerCloudDS(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalSurfCloud(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalSurfCloudDS(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalMapCloud(new pcl::PointCloud<PointType>()); for (int i = 0; i < (int)cloudKeyPoses3D->size(); i++) { *globalCornerCloud += *transformPointCloud(cornerCloudKeyFrames[i], &cloudKeyPoses6D->points[i]); *globalSurfCloud += *transformPointCloud(surfCloudKeyFrames[i], &cloudKeyPoses6D->points[i]); cout << "\r" << std::flush << "Processing feature cloud " << i << " of " << cloudKeyPoses6D->size() << " ..."; } // down-sample and save corner cloud downSizeFilterCorner.setInputCloud(globalCornerCloud); downSizeFilterCorner.filter(*globalCornerCloudDS); pcl::io::savePCDFileASCII(savePCDDirectory + "cloudCorner.pcd", *globalCornerCloudDS); // down-sample and save surf cloud downSizeFilterSurf.setInputCloud(globalSurfCloud); downSizeFilterSurf.filter(*globalSurfCloudDS); pcl::io::savePCDFileASCII(savePCDDirectory + "cloudSurf.pcd", *globalSurfCloudDS); // down-sample and save global point cloud map *globalMapCloud += *globalCornerCloud; *globalMapCloud += *globalSurfCloud; pcl::io::savePCDFileASCII(savePCDDirectory + "cloudGlobal.pcd", *globalMapCloud); cout << "****************************************************" << endl; cout << "Saving map to pcd files completed" << endl; } void publishGlobalMap() { if (pubLaserCloudSurround.getNumSubscribers() == 0) return; if (cloudKeyPoses3D->points.empty() == true) return; pcl::KdTreeFLANN<PointType>::Ptr kdtreeGlobalMap(new pcl::KdTreeFLANN<PointType>());; pcl::PointCloud<PointType>::Ptr globalMapKeyPoses(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalMapKeyPosesDS(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalMapKeyFrames(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalMapKeyFramesDS(new pcl::PointCloud<PointType>()); // kd-tree to find near key frames to visualize std::vector<int> pointSearchIndGlobalMap; std::vector<float> pointSearchSqDisGlobalMap; // search near key frames to visualize mtx.lock(); kdtreeGlobalMap->setInputCloud(cloudKeyPoses3D); kdtreeGlobalMap->radiusSearch(cloudKeyPoses3D->back(), globalMapVisualizationSearchRadius, pointSearchIndGlobalMap, pointSearchSqDisGlobalMap, 0); mtx.unlock(); for (int i = 0; i < (int)pointSearchIndGlobalMap.size(); ++i) globalMapKeyPoses->push_back(cloudKeyPoses3D->points[pointSearchIndGlobalMap[i]]); // downsample near selected key frames pcl::VoxelGrid<PointType> downSizeFilterGlobalMapKeyPoses; // for global map visualization downSizeFilterGlobalMapKeyPoses.setLeafSize(globalMapVisualizationPoseDensity, globalMapVisualizationPoseDensity, globalMapVisualizationPoseDensity); // for global map visualization downSizeFilterGlobalMapKeyPoses.setInputCloud(globalMapKeyPoses); downSizeFilterGlobalMapKeyPoses.filter(*globalMapKeyPosesDS); // extract visualized and downsampled key frames for (int i = 0; i < (int)globalMapKeyPosesDS->size(); ++i){ if (pointDistance(globalMapKeyPosesDS->points[i], cloudKeyPoses3D->back()) > globalMapVisualizationSearchRadius) continue; int thisKeyInd = (int)globalMapKeyPosesDS->points[i].intensity; *globalMapKeyFrames += *transformPointCloud(cornerCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]); *globalMapKeyFrames += *transformPointCloud(surfCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]); } // downsample visualized points pcl::VoxelGrid<PointType> downSizeFilterGlobalMapKeyFrames; // for global map visualization downSizeFilterGlobalMapKeyFrames.setLeafSize(globalMapVisualizationLeafSize, globalMapVisualizationLeafSize, globalMapVisualizationLeafSize); // for global map visualization downSizeFilterGlobalMapKeyFrames.setInputCloud(globalMapKeyFrames); downSizeFilterGlobalMapKeyFrames.filter(*globalMapKeyFramesDS); publishCloud(&pubLaserCloudSurround, surf_GlobalMap/*globalMapKeyFramesDS*/, timeLaserInfoStamp, "odom"); } // 回环线程 void loopClosureThread() { if (loopClosureEnableFlag == false) return; ros::Rate rate(0.2); while (ros::ok()) { rate.sleep(); performLoopClosure(); } } /** * @brief detectLoopClosure 获取回环候选关键帧 * 1. 位姿节点构建成kd-tree * 2. 根据最新关键帧的位置,在kd-tree中查找给定范围内的节点,取时间上最老的节点作为闭环候选 * 3. 将最新关键帧的特征点点云变换到世界坐标系下,储存到latestKeyFrameCloud * 4. 跟据闭环候选关键帧,取前后几帧,合并成局部地图 * 5. 输出两个id * @param latestID [输出]最新关键帧id * @param closestID [输出]闭环候选关键帧id * @return */ bool detectLoopClosure(int *latestID, int *closestID) { int latestFrameIDLoopCloure; int closestHistoryFrameID; latestKeyFrameCloud->clear(); nearHistoryKeyFrameCloud->clear(); std::lock_guard<std::mutex> lock(mtx); // find the closest history key frame std::vector<int> pointSearchIndLoop; std::vector<float> pointSearchSqDisLoop; // 所有关键帧位置——构建KD tree kdtreeHistoryKeyPoses->setInputCloud(cloudKeyPoses3D); // 查找符合范围内的所有节点 (要查找的节点中心,半径,输出,输出) kdtreeHistoryKeyPoses->radiusSearch(cloudKeyPoses3D->back(), historyKeyframeSearchRadius, pointSearchIndLoop, pointSearchSqDisLoop, 0); closestHistoryFrameID = -1; // 遍历查找到的节点 for (int i = 0; i < (int)pointSearchIndLoop.size(); ++i) { // 取对应的索引 int id = pointSearchIndLoop[i]; // 检查时间戳是否大于阈值 // 取最老的闭环 if (abs(cloudKeyPoses6D->points[id].time - timeLaserCloudInfoLast) > historyKeyframeSearchTimeDiff) { closestHistoryFrameID = id; break; } } if (closestHistoryFrameID == -1) return false; // 如果找到的刚好是本身 if ((int)cloudKeyPoses3D->size() - 1 == closestHistoryFrameID) return false; // save latest key frames // 储存最新帧的id latestFrameIDLoopCloure = cloudKeyPoses3D->size() - 1; // 将最新帧的特征点变换到世界坐标系,然后合并到latestKeyFrameCloud *latestKeyFrameCloud += *transformPointCloud(cornerCloudKeyFrames[latestFrameIDLoopCloure], &cloudKeyPoses6D->points[latestFrameIDLoopCloure]); *latestKeyFrameCloud += *transformPointCloud(surfCloudKeyFrames[latestFrameIDLoopCloure], &cloudKeyPoses6D->points[latestFrameIDLoopCloure]); // save history near key frames bool nearFrameAvailable = false; // 取闭环候选的前后各historyKeyframeSearchNum帧,合并成局部回环地图 for (int j = -historyKeyframeSearchNum; j <= historyKeyframeSearchNum; ++j) { // 如果是前后两头的情况,则跳过 if (closestHistoryFrameID + j < 0 || closestHistoryFrameID + j > latestFrameIDLoopCloure) continue; // 合并形成 闭环候选的局部地图 *nearHistoryKeyFrameCloud += *transformPointCloud(cornerCloudKeyFrames[closestHistoryFrameID+j], &cloudKeyPoses6D->points[closestHistoryFrameID+j]); *nearHistoryKeyFrameCloud += *transformPointCloud(surfCloudKeyFrames[closestHistoryFrameID+j], &cloudKeyPoses6D->points[closestHistoryFrameID+j]); nearFrameAvailable = true; } if (nearFrameAvailable == false) return false; *latestID = latestFrameIDLoopCloure; //最新关键帧的id *closestID = closestHistoryFrameID; //闭环候选关键帧id return true; } void performLoopClosure() { if (cloudKeyPoses3D->points.empty() == true) return; int latestFrameIDLoopCloure; int closestHistoryFrameID; // kt-tree找临近位姿节点 if (detectLoopClosure(&latestFrameIDLoopCloure, &closestHistoryFrameID) == false) return; // ICP Settings pcl::IterativeClosestPoint<PointType, PointType> icp; icp.setMaxCorrespondenceDistance(100); icp.setMaximumIterations(100); icp.setTransformationEpsilon(1e-6); icp.setEuclideanFitnessEpsilon(1e-6); icp.setRANSACIterations(0); // Downsample map cloud // 局部地图将采样 pcl::PointCloud<PointType>::Ptr cloud_temp(new pcl::PointCloud<PointType>()); downSizeFilterICP.setInputCloud(nearHistoryKeyFrameCloud); downSizeFilterICP.filter(*cloud_temp); *nearHistoryKeyFrameCloud = *cloud_temp; // publish history near key frames // 发布闭环候选局部地图 publishCloud(&pubHistoryKeyFrames, nearHistoryKeyFrameCloud, timeLaserInfoStamp, "odom"); // Align clouds // icp匹配 icp.setInputSource(latestKeyFrameCloud); icp.setInputTarget(nearHistoryKeyFrameCloud); pcl::PointCloud<PointType>::Ptr unused_result(new pcl::PointCloud<PointType>()); icp.align(*unused_result); // 如果icp不收敛,或者匹配(距离)>阈值,匹配失败 // std::cout << "ICP converg flag:" << icp.hasConverged() << ". Fitness score: " << icp.getFitnessScore() << std::endl; if (icp.hasConverged() == false || icp.getFitnessScore() > historyKeyframeFitnessScore) return; // publish corrected cloud // 否则,回环成功,发布 : 将最新关键帧的点云变换到世界地图上 if (pubIcpKeyFrames.getNumSubscribers() != 0){ pcl::PointCloud<PointType>::Ptr closed_cloud(new pcl::PointCloud<PointType>()); pcl::transformPointCloud(*latestKeyFrameCloud, *closed_cloud, icp.getFinalTransformation()); publishCloud(&pubIcpKeyFrames, closed_cloud, timeLaserInfoStamp, "odom"); } // Get pose transformation float x, y, z, roll, pitch, yaw; // 取回环得到: [wrong世界坐标系 到 闭环局部地图(true世界坐标系)的变换] (注意:不是得到Lidar在true世界坐标系的位姿) Eigen::Affine3f correctionLidarFrame; correctionLidarFrame = icp.getFinalTransformation(); // transform from world origin to wrong pose // 取旧的最新关键帧位姿(这里描述为wrong的位姿) Eigen::Affine3f tWrong = pclPointToAffine3f(cloudKeyPoses6D->points[latestFrameIDLoopCloure]); // transform from world origin to corrected pose // 计算 旧的关键帧Lidar坐标系到 回环得到的Lidar坐标系变换 // tWrong : 最新关键帧Lidar坐标系 到 wrong世界坐标系的位姿 // correctionLidarFrame : wrong世界坐标系 到 闭环局部地图(true世界坐标系)的变换 // tCorrect: 最新关键帧Lidar坐标系在世界坐标系的位姿 Eigen::Affine3f tCorrect = correctionLidarFrame * tWrong;// pre-multiplying -> successive rotation about a fixed frame // 分解 pcl::getTranslationAndEulerAngles (tCorrect, x, y, z, roll, pitch, yaw); // 构造回环因子 gtsam::Pose3 poseFrom = Pose3(Rot3::RzRyRx(roll, pitch, yaw), Point3(x, y, z)); gtsam::Pose3 poseTo = pclPointTogtsamPose3(cloudKeyPoses6D->points[closestHistoryFrameID]); // 噪声模型 (取icp匹配分数) gtsam::Vector Vector6(6); float noiseScore = icp.getFitnessScore(); Vector6 << noiseScore, noiseScore, noiseScore, noiseScore, noiseScore, noiseScore; noiseModel::Diagonal::shared_ptr constraintNoise = noiseModel::Diagonal::Variances(Vector6); // Add pose constraint // std::lock_guard<std::mutex> lock(mtx); // 添加到因子图() // poseFrom.inv(): 回环得到的Lidar坐标系到旧的关键帧Lidar坐标系的变换 // poseTo: 闭环关键帧对应的Lidar位姿 // poseFrom.between(poseTo): 闭环关键帧到最新关键帧的变换 gtSAMgraph.add(BetweenFactor<Pose3>(latestFrameIDLoopCloure, closestHistoryFrameID, poseFrom.between(poseTo), constraintNoise)); isam->update(gtSAMgraph); isam->update(); isam->update(); isam->update(); isam->update(); isam->update(); gtSAMgraph.resize(0); // 取优化结果 isamCurrentEstimate = isam->calculateEstimate(); Pose3 latestEstimate = isamCurrentEstimate.at<Pose3>(isamCurrentEstimate.size()-1); // 更新当前帧位姿 transformTobeMapped[0] = latestEstimate.rotation().roll(); transformTobeMapped[1] = latestEstimate.rotation().pitch(); transformTobeMapped[2] = latestEstimate.rotation().yaw(); transformTobeMapped[3] = latestEstimate.translation().x(); transformTobeMapped[4] = latestEstimate.translation().y(); transformTobeMapped[5] = latestEstimate.translation().z(); // 清除path,需要更新全局位姿 correctPoses(); aLoopIsClosed = true; } /** * @brief updateInitialGuess * 更新初始化位姿估计 * 1. 初始化-第一帧:使用9轴IMU数据 * 2. 预积分数据有效,使用IMU预积分数据 * 3. 该帧点云对应的IMU数据有效,使用IMU数据 */ void updateInitialGuess() { static Eigen::Affine3f lastImuTransformation; // initialization // 如果是初始化,第一帧,则用9轴IMU if (cloudKeyPoses3D->points.empty()) { // 记录该帧点云对应的Lidar姿态(在imageProjection模块中根据9轴IMU的数据得到的) transformTobeMapped[0] = cloudInfo.imuRollInit; transformTobeMapped[1] = cloudInfo.imuPitchInit; transformTobeMapped[2] = cloudInfo.imuYawInit/*-M_PI/2*/; // 如果不使用IMU的朝向初始化 if (!useImuHeadingInitialization) transformTobeMapped[2] = 0; // 记录姿态 lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit); // save imu before return; return; } // 如果不是初始化,那么就用IMU预积分的结果作为 初始位姿估计 // use imu pre-integration estimation for pose guess // (发生回环校正后,imuPreintegrationResetId会改变) if (cloudInfo.odomAvailable == true && cloudInfo.imuPreintegrationResetId == imuPreintegrationResetId) { transformTobeMapped[0] = cloudInfo.initialGuessRoll; transformTobeMapped[1] = cloudInfo.initialGuessPitch; transformTobeMapped[2] = cloudInfo.initialGuessYaw; transformTobeMapped[3] = cloudInfo.initialGuessX; transformTobeMapped[4] = cloudInfo.initialGuessY; transformTobeMapped[5] = cloudInfo.initialGuessZ; lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit); // save imu before return; return; } // 如果点云msg标记了对应的IMU数据可用,那么使用IMU的数据增量作为初始位姿估计 // use imu incremental estimation for pose guess (only rotation) if (cloudInfo.imuAvailable == true) { Eigen::Affine3f transBack = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit); // 位姿估计增量(当前帧到上一帧的变换) Eigen::Affine3f transIncre = lastImuTransformation.inverse() * transBack; // 根据增量,以及上一次的位姿估计transformTobeMapped,计算当前帧位姿估计transFinal Eigen::Affine3f transTobe = trans2Affine3f(transformTobeMapped); Eigen::Affine3f transFinal = transTobe * transIncre; // 分解transFinal,储存到transformTobeMapped pcl::getTranslationAndEulerAngles(transFinal, transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5], transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]); lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit); // save imu before return; return; } } void extractForLoopClosure() { pcl::PointCloud<PointType>::Ptr cloudToExtract(new pcl::PointCloud<PointType>()); int numPoses = cloudKeyPoses3D->size(); for (int i = numPoses-1; i >= 0; --i) { if ((int)cloudToExtract->size() <= surroundingKeyframeSize) cloudToExtract->push_back(cloudKeyPoses3D->points[i]); else break; } extractCloud(cloudToExtract); } void extractNearby() { pcl::PointCloud<PointType>::Ptr surroundingKeyPoses(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr surroundingKeyPosesDS(new pcl::PointCloud<PointType>()); std::vector<int> pointSearchInd; std::vector<float> pointSearchSqDis; // extract all the nearby key poses and downsample them // 将所有关键帧的位置(x,y,z)构建KD-Tree kdtreeSurroundingKeyPoses->setInputCloud(cloudKeyPoses3D); // create kd-tree // 查找空间距离最新关键帧最近的节点,把节点id和距离分别储存到pointSearchInd,pointSearchSqDis kdtreeSurroundingKeyPoses->radiusSearch(cloudKeyPoses3D->back(), (double)surroundingKeyframeSearchRadius, pointSearchInd, pointSearchSqDis); // 最近点队列储存到 surroundingKeyPoses , 准备进行滤波处理 for (int i = 0; i < (int)pointSearchInd.size(); ++i) { int id = pointSearchInd[i]; surroundingKeyPoses->push_back(cloudKeyPoses3D->points[id]); } // 降采样 downSizeFilterSurroundingKeyPoses.setInputCloud(surroundingKeyPoses); downSizeFilterSurroundingKeyPoses.filter(*surroundingKeyPosesDS); // also extract some latest key frames in case the robot rotates in one position // 同时,提取时间上最近的关键帧,以防只做旋转运动 int numPoses = cloudKeyPoses3D->size(); for (int i = numPoses-1; i >= 0; --i) { // 把当前帧点云时间戳前10秒内的节点,加入到 surroundingKeyPosesDS if (timeLaserCloudInfoLast - cloudKeyPoses6D->points[i].time < 10.0) surroundingKeyPosesDS->push_back(cloudKeyPoses3D->points[i]); else break; } extractCloud(surroundingKeyPosesDS); } void extractCloud(pcl::PointCloud<PointType>::Ptr cloudToExtract) { // vector: cloudToExtract有多少个位姿节点,就有多少帧点云数据 std::vector<pcl::PointCloud<PointType>> laserCloudCornerSurroundingVec; std::vector<pcl::PointCloud<PointType>> laserCloudSurfSurroundingVec; laserCloudCornerSurroundingVec.resize(cloudToExtract->size()); laserCloudSurfSurroundingVec.resize(cloudToExtract->size()); // extract surrounding map #pragma omp parallel for num_threads(numberOfCores) // 遍历提取到的位姿节点 for (int i = 0; i < (int)cloudToExtract->size(); ++i) { // 再次检查两个位姿节点的距离 if (pointDistance(cloudToExtract->points[i], cloudKeyPoses3D->back()) > surroundingKeyframeSearchRadius) continue; // 取id,用来取姿态 int thisKeyInd = (int)cloudToExtract->points[i].intensity; // 将提取的边沿点特征集合,变换到世界坐标系W laserCloudCornerSurroundingVec[i] = *transformPointCloud(cornerCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]); // 将提取的平面点特征集合,变换到世界坐标系W laserCloudSurfSurroundingVec[i] = *transformPointCloud(surfCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]); } // fuse the map /// 清空局部地图,再合并 laserCloudCornerFromMap->clear(); laserCloudSurfFromMap->clear(); // 遍历提取到的位姿节点 for (int i = 0; i < (int)cloudToExtract->size(); ++i) { // 合并边缘点和平面点特征,构成 世界坐标系W下的局部特征点云地图 *laserCloudCornerFromMap += laserCloudCornerSurroundingVec[i]; *laserCloudSurfFromMap += laserCloudSurfSurroundingVec[i]; } /// 降采样 // Downsample the surrounding corner key frames (or map) downSizeFilterCorner.setInputCloud(laserCloudCornerFromMap); downSizeFilterCorner.filter(*laserCloudCornerFromMapDS); laserCloudCornerFromMapDSNum = laserCloudCornerFromMapDS->size(); // Downsample the surrounding surf key frames (or map) downSizeFilterSurf.setInputCloud(laserCloudSurfFromMap); downSizeFilterSurf.filter(*laserCloudSurfFromMapDS); laserCloudSurfFromMapDSNum = laserCloudSurfFromMapDS->size(); } /** * @brief extractSurroundingKeyFrames * 关键帧点云合并 * 1. 提取最新关键帧附近的关键帧 * 2. 将这些临近关键帧的边缘点和平面点特征转换到世界坐标系 * 3. 合并 */ void extractSurroundingKeyFrames() { // 如果关键帧容器为空 if (cloudKeyPoses3D->points.empty() == true) return; if (loopClosureEnableFlag == true) { // 回环提取? extractForLoopClosure(); } else { // 从附近提取 extractNearby(); } } /// 对当前帧点云的边缘点、平面点集合进行降采样 void downsampleCurrentScan() { // Downsample cloud from current scan laserCloudCornerLastDS->clear(); downSizeFilterCorner.setInputCloud(laserCloudCornerLast); downSizeFilterCorner.filter(*laserCloudCornerLastDS); laserCloudCornerLastDSNum = laserCloudCornerLastDS->size(); laserCloudSurfLastDS->clear(); downSizeFilterSurf.setInputCloud(laserCloudSurfLast); downSizeFilterSurf.filter(*laserCloudSurfLastDS); laserCloudSurfLastDSNum = laserCloudSurfLastDS->size(); } void updatePointAssociateToMap() { // transformTobeMapped: 预积分的位姿估计 // 转换成Eagen类型 transPointAssociateToMap transPointAssociateToMap = trans2Affine3f(transformTobeMapped); } void cornerOptimization() { // 更新关联点? updatePointAssociateToMap(); #pragma omp parallel for num_threads(numberOfCores) for (int i = 0; i < laserCloudCornerLastDSNum; i++) { //pcl::PointXYZI PointType pointOri, pointSel, coeff; std::vector<int> pointSearchInd; std::vector<float> pointSearchSqDis; // 取当前帧点云——降采样后的边缘点集合中的一个点 pointOri = laserCloudCornerLastDS->points[i]; // 将激光雷达坐标系的点转换到世界坐标系 pointAssociateToMap(&pointOri, &pointSel); // 在世界坐标系下的 局部边缘线特征地图中找到 与 pointSel 最临近的5个点 kdtreeCornerFromMap->nearestKSearch(pointSel, 5, pointSearchInd, pointSearchSqDis); cv::Mat matA1(3, 3, CV_32F, cv::Scalar::all(0)); cv::Mat matD1(1, 3, CV_32F, cv::Scalar::all(0)); cv::Mat matV1(3, 3, CV_32F, cv::Scalar::all(0)); // 如果第5个点距离 pointSel < 1m if (pointSearchSqDis[4] < 1.0) { float cx = 0, cy = 0, cz = 0; // 遍历这5个点 for (int j = 0; j < 5; j++) { // 把这5个点的xyz相加后,求均值 cx += laserCloudCornerFromMapDS->points[pointSearchInd[j]].x; cy += laserCloudCornerFromMapDS->points[pointSearchInd[j]].y; cz += laserCloudCornerFromMapDS->points[pointSearchInd[j]].z; } cx /= 5; cy /= 5; cz /= 5; //求均方差 //对点云协方差矩阵进行PCA主成分分析:若这五个点分布在直线上,协方差矩阵的特征值包含一个元素显著大于其余两个, //与该特征值相关的特征向量表示所处直线的方向; //若这五个点分布在平面上,协方差矩阵的特征值存在一个显著小的元素,与该特征值相关的特征向量表示所处平面的法线方向 float a11 = 0, a12 = 0, a13 = 0, a22 = 0, a23 = 0, a33 = 0; for (int j = 0; j < 5; j++) { float ax = laserCloudCornerFromMapDS->points[pointSearchInd[j]].x - cx; float ay = laserCloudCornerFromMapDS->points[pointSearchInd[j]].y - cy; float az = laserCloudCornerFromMapDS->points[pointSearchInd[j]].z - cz; a11 += ax * ax; a12 += ax * ay; a13 += ax * az; a22 += ay * ay; a23 += ay * az; a33 += az * az; } a11 /= 5; a12 /= 5; a13 /= 5; a22 /= 5; a23 /= 5; a33 /= 5; //构建矩阵 matA1.at<float>(0, 0) = a11; matA1.at<float>(0, 1) = a12; matA1.at<float>(0, 2) = a13; matA1.at<float>(1, 0) = a12; matA1.at<float>(1, 1) = a22; matA1.at<float>(1, 2) = a23; matA1.at<float>(2, 0) = a13; matA1.at<float>(2, 1) = a23; matA1.at<float>(2, 2) = a33; //特征值分解 cv::eigen(matA1, matD1, matV1); //如果最大的特征值大于第二大的特征值三倍以上 if (matD1.at<float>(0, 0) > 3 * matD1.at<float>(0, 1)) { //p0 float x0 = pointSel.x; float y0 = pointSel.y; float z0 = pointSel.z; //根据特征值分解,通过最大特征值对应的特征向量构建直线 //根据特征向量的方向构建x1和x2,两者连线代表最近点构成的直线 //p1 float x1 = cx + 0.1 * matV1.at<float>(0, 0); float y1 = cy + 0.1 * matV1.at<float>(0, 1); float z1 = cz + 0.1 * matV1.at<float>(0, 2); //p2 float x2 = cx - 0.1 * matV1.at<float>(0, 0); float y2 = cy - 0.1 * matV1.at<float>(0, 1); float z2 = cz - 0.1 * matV1.at<float>(0, 2); // 向量v1=[x0-x1,y0-y1,z0-z1] = [l , m , n] // 向量v2=[x0-x2,y0-y2,z0-z2] = [o , p , q] // (v1Xv2)=(mq-np,no-lq,lp-mo)^T float a012 = sqrt(((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1)) * ((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1)) + ((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1)) * ((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1)) + ((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1)) * ((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1))); // (p1-p2)=[x1-x2,y1-y2,z1-z2] // 向量|p1-p2|的模长 float l12 = sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2)); // (p1-p2)X(v1 X v2) // 距离ld2对x0/y0/z0的偏导 float la = ((y1 - y2)*((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1)) + (z1 - z2)*((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1))) / a012 / l12; float lb = -((x1 - x2)*((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1)) - (z1 - z2)*((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1))) / a012 / l12; float lc = -((x1 - x2)*((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1)) + (y1 - y2)*((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1))) / a012 / l12; // 点p0到边缘线距离 float ld2 = a012 / l12; // 权重系数计算 float s = 1 - 0.9 * fabs(ld2); // 距离对点(p0)的雅克比J coeff.x = s * la; coeff.y = s * lb; coeff.z = s * lc; coeff.intensity = s * ld2; // 距离足够小才使用这个点 if (s > 0.1) { laserCloudOriCornerVec[i] = pointOri; // 保存系数 coeffSelCornerVec[i] = coeff; laserCloudOriCornerFlag[i] = true; } } } } } void surfOptimization() { updatePointAssociateToMap(); #pragma omp parallel for num_threads(numberOfCores) for (int i = 0; i < laserCloudSurfLastDSNum; i++) { //pcl::PointXYZI PointType pointOri, pointSel, coeff; std::vector<int> pointSearchInd; std::vector<float> pointSearchSqDis; // 取当前帧点云——降采样后的平面点集合中的一个点 pointOri = laserCloudSurfLastDS->points[i]; // 将该点转换到世界坐标系W pointAssociateToMap(&pointOri, &pointSel); // 在世界坐标系下的 局部平面点特征地图中找到 与 pointSel 最临近的5个点 kdtreeSurfFromMap->nearestKSearch(pointSel, 5, pointSearchInd, pointSearchSqDis); Eigen::Matrix<float, 5, 3> matA0; Eigen::Matrix<float, 5, 1> matB0; Eigen::Vector3f matX0; matA0.setZero(); matB0.fill(-1); matX0.setZero(); // 如果第5个点距离 该点 < 1m if (pointSearchSqDis[4] < 1.0) { // 遍历5个点 for (int j = 0; j < 5; j++) { matA0(j, 0) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].x; matA0(j, 1) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].y; matA0(j, 2) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].z; } //! matA0 matX0 //! | x0 | y0 | z0 | |X0_1| //! | x1 | y1 | z1 | |X0_2| //! | x2 | y2 | z2 | |X0_3| //! | x3 | y3 | z3 | //! | x4 | y4 | z4 | /// 由于这5个点在一个平面上,直接通过矩阵运算求解5个点构成平面的法向量 matX0 matX0 = matA0.colPivHouseholderQr().solve(matB0); float pa = matX0(0, 0); float pb = matX0(1, 0); float pc = matX0(2, 0); float pd = 1; // 法向量归一化 float ps = sqrt(pa * pa + pb * pb + pc * pc); pa /= ps; pb /= ps; pc /= ps; pd /= ps; bool planeValid = true; // 遍历这5个点 for (int j = 0; j < 5; j++) { // 如果任一个点到拟合出来的平面距离>0.2 直接否定这个平面 if (fabs(pa * laserCloudSurfFromMapDS->points[pointSearchInd[j]].x + pb * laserCloudSurfFromMapDS->points[pointSearchInd[j]].y + pc * laserCloudSurfFromMapDS->points[pointSearchInd[j]].z + pd) > 0.2) { planeValid = false; break; } } // 如果平面有效 if (planeValid) { // 计算 当前帧的平面点 到 这个平面的距离 float pd2 = pa * pointSel.x + pb * pointSel.y + pc * pointSel.z + pd; // 权重系数 float s = 1 - 0.9 * fabs(pd2) / sqrt(sqrt(pointSel.x * pointSel.x + pointSel.y * pointSel.y + pointSel.z * pointSel.z)); // pa pb pc 同时也是 点到平面距离对(x0,y0,z0)的偏导 coeff.x = s * pa; coeff.y = s * pb; coeff.z = s * pc; coeff.intensity = s * pd2; if (s > 0.1) { laserCloudOriSurfVec[i] = pointOri; coeffSelSurfVec[i] = coeff; laserCloudOriSurfFlag[i] = true; } } } } } // 将边缘点特征和平面点特征的 点以及对应的系数,一同储存在laserCloudOri,coeffSel void combineOptimizationCoeffs() { // combine corner coeffs // 遍历当前帧点云-边缘点特征 for (int i = 0; i < laserCloudCornerLastDSNum; ++i){ // 如果这个边缘点特征有效 if (laserCloudOriCornerFlag[i] == true){ // 储存这个边缘点(在Lidar坐标系的) laserCloudOri->push_back(laserCloudOriCornerVec[i]); // 储存对应的系数(由特征关联得到的),用来构建增量方程用的? coeffSel->push_back(coeffSelCornerVec[i]); } } // combine surf coeffs // 遍历当前帧点云-平面点特征 for (int i = 0; i < laserCloudSurfLastDSNum; ++i){ if (laserCloudOriSurfFlag[i] == true){ laserCloudOri->push_back(laserCloudOriSurfVec[i]); coeffSel->push_back(coeffSelSurfVec[i]); } } // reset flag for next iteration // 特征点有效标志位清空,准备下一帧扫描点云的到来 std::fill(laserCloudOriCornerFlag.begin(), laserCloudOriCornerFlag.end(), false); std::fill(laserCloudOriSurfFlag.begin(), laserCloudOriSurfFlag.end(), false); } // 迭代优化,求解Lidar位姿 bool LMOptimization(int iterCount) { // This optimization is from the original loam_velodyne by Ji Zhang, need to cope with coordinate transformation // lidar <- camera --- camera <- lidar // x = z --- x = y // y = x --- y = z // z = y --- z = x // roll = yaw --- roll = pitch // pitch = roll --- pitch = yaw // yaw = pitch --- yaw = roll // lidar -> camera // 把激光雷达的位姿转换成相机位姿 float srx = sin(transformTobeMapped[1]); // pitch sin(rx) float crx = cos(transformTobeMapped[1]); float sry = sin(transformTobeMapped[2]); // yaw sin(ry) float cry = cos(transformTobeMapped[2]); float srz = sin(transformTobeMapped[0]); // roll sin(rz) float crz = cos(transformTobeMapped[0]); // 如果特征点数太少,直接返回 int laserCloudSelNum = laserCloudOri->size(); if (laserCloudSelNum < 50) { return false; } cv::Mat matA(laserCloudSelNum, 6, CV_32F, cv::Scalar::all(0)); cv::Mat matAt(6, laserCloudSelNum, CV_32F, cv::Scalar::all(0)); cv::Mat matAtA(6, 6, CV_32F, cv::Scalar::all(0)); cv::Mat matB(laserCloudSelNum, 1, CV_32F, cv::Scalar::all(0)); cv::Mat matAtB(6, 1, CV_32F, cv::Scalar::all(0)); cv::Mat matX(6, 1, CV_32F, cv::Scalar::all(0)); cv::Mat matP(6, 6, CV_32F, cv::Scalar::all(0)); PointType pointOri, coeff; // 遍历特征点 for (int i = 0; i < laserCloudSelNum; i++) { // lidar -> camera // 将特征点,从激光雷达坐标系变换到相机坐标系 pointOri.x = laserCloudOri->points[i].y; pointOri.y = laserCloudOri->points[i].z; pointOri.z = laserCloudOri->points[i].x; // lidar -> camera // 这些系数,本来是 特征距离对 激光雷达坐标系下的当前帧点云中的点(x,y,z)求导 // 现在,这些点都从激光雷达坐标系变换到相机坐标系中了,因此,求导结果也变了 coeff.x = coeffSel->points[i].y; coeff.y = coeffSel->points[i].z; coeff.z = coeffSel->points[i].x; coeff.intensity = coeffSel->points[i].intensity; // in camera /// qpc : modify: 这实际上就是 特征距离 对 Lidar位姿求导,整这些变换花里胡哨的没用! /// rz: roll /// rx: pitch /// ry: yaw /// pointOri.z : point.x /// pointOri.x : point.y /// pointOri.y : point.z /// coeff.x : coeffSel->points[i].y /// coeff.y : coeffSel->points[i].z /// coeff.z : coeffSel->points[i].x /// arx = (cp*sy*sr*pt.y + cp*cr*sy*pt.z - sp*sy*pt.x)* coeffSel->points[i].y + /// (-sp*sr*pt.y - cr*sp*pt.z - cp*pt.x)* coeffSel->points[i].z + /// (cp*cy*sr*pt.y + cp*cy*cr*pt.z - cy*sp*pt.x)* coeffSel->points[i].x /// float arx = (crx*sry*srz*pointOri.x + crx*crz*sry*pointOri.y - srx*sry*pointOri.z) * coeff.x + (-srx*srz*pointOri.x - crz*srx*pointOri.y - crx*pointOri.z) * coeff.y + (crx*cry*srz*pointOri.x + crx*cry*crz*pointOri.y - cry*srx*pointOri.z) * coeff.z; float ary = ((cry*srx*srz - crz*sry)*pointOri.x + (sry*srz + cry*crz*srx)*pointOri.y + crx*cry*pointOri.z) * coeff.x + ((-cry*crz - srx*sry*srz)*pointOri.x + (cry*srz - crz*srx*sry)*pointOri.y - crx*sry*pointOri.z) * coeff.z; /// arz = ((cr*sp*sy-cy*sr)*pt.y+(-cy*cr-sp*sy*sr)*pt.y)*coeffSel->points[i].y+ /// ((cp*cr)*pt.y+(-cp*sr)*pt.z)*coeffSel->points[i].z+ /// ((sy*sr+cy*cr*sp)pt.y+(cr*sy-cy*sp*sr)*pt.z)*coeffSel->points[i].x float arz = ((crz*srx*sry - cry*srz)*pointOri.x + (-cry*crz-srx*sry*srz)*pointOri.y)*coeff.x + (crx*crz*pointOri.x - crx*srz*pointOri.y) * coeff.y + ((sry*srz + cry*crz*srx)*pointOri.x + (crz*sry-cry*srx*srz)*pointOri.y)*coeff.z; // lidar -> camera ===> qpc commit: 这个注释很迷 matA.at<float>(i, 0) = arz; // 特征距离d 对 Lidar位姿 roll求导 matA.at<float>(i, 1) = arx; // 特征距离d 对 Lidar位姿 pitch求导 matA.at<float>(i, 2) = ary; // 特征距离d 对 Lidar位姿 yaw求导 matA.at<float>(i, 3) = coeff.z; // 特征距离d 对 Lidar位姿 tx求导 matA.at<float>(i, 4) = coeff.x; // 特征距离d 对 Lidar位姿 ty求导 matA.at<float>(i, 5) = coeff.y; // 特征距离d 对 Lidar位姿 tz求导 matB.at<float>(i, 0) = -coeff.intensity; } // 求A^T cv::transpose(matA, matAt); // A^T*A ===> J^T J matAtA = matAt * matA; // J^T b matAtB = matAt * matB; // J^T J = -J^T b // QR分解求解: matX cv::solve(matAtA, matAtB, matX, cv::DECOMP_QR); // 如果 if (iterCount == 0) { cv::Mat matE(1, 6, CV_32F, cv::Scalar::all(0)); cv::Mat matV(6, 6, CV_32F, cv::Scalar::all(0)); cv::Mat matV2(6, 6, CV_32F, cv::Scalar::all(0)); // 特征值分解 cv::eigen(matAtA, matE, matV); matV.copyTo(matV2); isDegenerate = false; // TODO: 还没仔细看这个退化情况 // 如果特征值 < 100 , 出现退化情况 float eignThre[6] = {100, 100, 100, 100, 100, 100}; for (int i = 5; i >= 0; i--) { if (matE.at<float>(0, i) < eignThre[i]) { for (int j = 0; j < 6; j++) { matV2.at<float>(i, j) = 0; } isDegenerate = true; } else { break; } } matP = matV.inv() * matV2; } if (isDegenerate) { cv::Mat matX2(6, 1, CV_32F, cv::Scalar::all(0)); matX.copyTo(matX2); matX = matP * matX2; } // 更新待优化变量(Lidar位姿) transformTobeMapped[0] += matX.at<float>(0, 0); //roll transformTobeMapped[1] += matX.at<float>(1, 0); //pitch transformTobeMapped[2] += matX.at<float>(2, 0); //yaw transformTobeMapped[3] += matX.at<float>(3, 0); //x transformTobeMapped[4] += matX.at<float>(4, 0); //y transformTobeMapped[5] += matX.at<float>(5, 0); //z // 检查是否收敛 float deltaR = sqrt( pow(pcl::rad2deg(matX.at<float>(0, 0)), 2) + pow(pcl::rad2deg(matX.at<float>(1, 0)), 2) + pow(pcl::rad2deg(matX.at<float>(2, 0)), 2)); float deltaT = sqrt( pow(matX.at<float>(3, 0) * 100, 2) + pow(matX.at<float>(4, 0) * 100, 2) + pow(matX.at<float>(5, 0) * 100, 2)); if (deltaR < 0.05 && deltaT < 0.05) { return true; // converged } return false; // keep optimizing } void scan2MapOptimization() { // 如果位姿节点容器为空,直接返回 //if (cloudKeyPoses3D->points.empty()) // return; // 如果边缘点特征数量 和 平面点特征数量都大于阈值 if (laserCloudCornerLastDSNum > edgeFeatureMinValidNum && laserCloudSurfLastDSNum > surfFeatureMinValidNum) { // 分别构建kd-tree kdtreeCornerFromMap->setInputCloud(laserCloudCornerFromMapDS); kdtreeSurfFromMap->setInputCloud(laserCloudSurfFromMapDS); // 遍历30次 for (int iterCount = 0; iterCount < 30; iterCount++) { laserCloudOri->clear(); coeffSel->clear(); // 获取与当前帧的边缘点 对应的 局部边缘点匹配 // 并且计算特征距离,计算偏导 cornerOptimization(); // 获取与当前帧的边缘点 对应的 局部平面点匹配 // 并且计算特征距离,计算偏导 surfOptimization(); // 将边缘点特征和平面点特征的 点以及对应的系数,一同储存在laserCloudOri,coeffSel combineOptimizationCoeffs(); // 迭代优化? if (LMOptimization(iterCount) == true) break; } transformUpdate(); } else { ROS_WARN("Not enough features! Only %d edge and %d planar features available.", laserCloudCornerLastDSNum, laserCloudSurfLastDSNum); } } void transformUpdate() { // 如果该帧点云的IMU数据有效 if (cloudInfo.imuAvailable == true) { // 并且IMU 初始化时的Pitch < 1.4 rad? if (std::abs(cloudInfo.imuPitchInit) < 1.4) { double imuWeight = 0.05; tf::Quaternion imuQuaternion; tf::Quaternion transformQuaternion; double rollMid, pitchMid, yawMid; // slerp roll // 取LM优化后的 roll,设置 transformQuaternion.setRPY(transformTobeMapped[0], 0, 0); // 取该帧点云扫描起始时刻的 Lidar的roll姿态(从IMU的姿态转换到Lidar的姿态) imuQuaternion.setRPY(cloudInfo.imuRollInit, 0, 0); // slerp:球面线性插值(Spherical linear interpolation),是四元数的一种线性插值运算,主要用于在两个表示旋转的四元数之间平滑差值 // 由于imuWeight设置为0.05,那么插值得到的应该是 0.95transformQuaternion+0.05imuQuaternion (个人大概认为,还没看这种插值的原理) tf::Matrix3x3(transformQuaternion.slerp(imuQuaternion, imuWeight)).getRPY(rollMid, pitchMid, yawMid); // 更新roll transformTobeMapped[0] = rollMid; // slerp pitch transformQuaternion.setRPY(0, transformTobeMapped[1], 0); imuQuaternion.setRPY(0, cloudInfo.imuPitchInit, 0); tf::Matrix3x3(transformQuaternion.slerp(imuQuaternion, imuWeight)).getRPY(rollMid, pitchMid, yawMid); transformTobeMapped[1] = pitchMid; } } // 检查是否超过限定的值, transformTobeMapped[0] = constraintTransformation(transformTobeMapped[0], rotation_tollerance); transformTobeMapped[1] = constraintTransformation(transformTobeMapped[1], rotation_tollerance); transformTobeMapped[5] = constraintTransformation(transformTobeMapped[5], z_tollerance); } // 上界 float constraintTransformation(float value, float limit) { if (value < -limit) value = -limit; if (value > limit) value = limit; return value; } bool saveFrame() { if (cloudKeyPoses3D->points.empty()) return true; // 取上一帧点云对应的Lidar位姿 Eigen::Affine3f transStart = pclPointToAffine3f(cloudKeyPoses6D->back()); // 取当前帧点云的LM优化的Lidar位姿 Eigen::Affine3f transFinal = pcl::getTransformation(transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5], transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]); // 得到:当前帧到上一帧的变换 Eigen::Affine3f transBetween = transStart.inverse() * transFinal; // 分解 float x, y, z, roll, pitch, yaw; pcl::getTranslationAndEulerAngles(transBetween, x, y, z, roll, pitch, yaw); // 检查是否超过阈值 if (abs(roll) < surroundingkeyframeAddingAngleThreshold && abs(pitch) < surroundingkeyframeAddingAngleThreshold && abs(yaw) < surroundingkeyframeAddingAngleThreshold && sqrt(x*x + y*y + z*z) < surroundingkeyframeAddingDistThreshold) return false; return true; } // 添加Lidar里程计因子 void addOdomFactor() { // 如果是第一帧 if (cloudKeyPoses3D->points.empty()) { // 噪声模型 (roll,pitch,yaw,x,y,z) noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Variances((Vector(6) << 1e-2, 1e-2, M_PI*M_PI, 1e8, 1e8, 1e8).finished()); // rad*rad, meter*meter // 添加先验因子 (roll,pitch,yaw,x,y,z) gtSAMgraph.add(PriorFactor<Pose3>(0, trans2gtsamPose(transformTobeMapped), priorNoise)); // 设定Values (gtsam特有),表明待优化变量是这个 initialEstimate.insert(0, trans2gtsamPose(transformTobeMapped)); }else{ // 如果不是第一帧 // 噪声模型 noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Variances((Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); // 取上一个关键帧 因子图优化后的位姿 gtsam::Pose3 poseFrom = pclPointTogtsamPose3(cloudKeyPoses6D->points.back()); // 取当前帧点云,Lidar里程计位姿(LM优化的位姿) gtsam::Pose3 poseTo = trans2gtsamPose(transformTobeMapped); // poseFrom.between(poseTo) 当前帧位姿 到 上一个关键帧 的变换 gtSAMgraph.add(BetweenFactor<Pose3>(cloudKeyPoses3D->size()-1, cloudKeyPoses3D->size(), poseFrom.between(poseTo), odometryNoise)); // 插入到Values, 表明待优化变量是这个 initialEstimate.insert(cloudKeyPoses3D->size(), poseTo); } } void addGPSFactor() { if (gpsQueue.empty()) return; // wait for system initialized and settles down // 如果关键帧位姿队列为空,直接返回? if (cloudKeyPoses3D->points.empty()) return; else { // 如果关键帧队列最前的节点与最后的节点,距离<5m,也直接返回 if (pointDistance(cloudKeyPoses3D->front(), cloudKeyPoses3D->back()) < 5.0) return; } // pose covariance small, no need to correct // 如果 if (poseCovariance(3,3) < poseCovThreshold && poseCovariance(4,4) < poseCovThreshold) return; // last gps position static PointType lastGPSPoint; while (!gpsQueue.empty()) { // gps数据时间戳比 点云 早0.2秒以上, GPS太旧了 if (gpsQueue.front().header.stamp.toSec() < timeLaserCloudInfoLast - 0.2) { // message too old gpsQueue.pop_front(); } else if (gpsQueue.front().header.stamp.toSec() > timeLaserCloudInfoLast + 0.2) { // message too new break; } else { // gps时间戳与点云时间戳同步了 // 取gps数据 nav_msgs::Odometry thisGPS = gpsQueue.front(); gpsQueue.pop_front(); // GPS too noisy, skip float noise_x = thisGPS.pose.covariance[0]; float noise_y = thisGPS.pose.covariance[7]; float noise_z = thisGPS.pose.covariance[14]; // 检查协方差 if (noise_x > gpsCovThreshold || noise_y > gpsCovThreshold) continue; float gps_x = thisGPS.pose.pose.position.x; float gps_y = thisGPS.pose.pose.position.y; float gps_z = thisGPS.pose.pose.position.z; if (!useGpsElevation) { // 如果不使用GPS的海拔,那就用Lidar里程计的 gps_z = transformTobeMapped[5]; noise_z = 0.01; } // GPS not properly initialized (0,0,0) if (abs(gps_x) < 1e-6 && abs(gps_y) < 1e-6) continue; // Add GPS every a few meters // 检查距离上一次添加gps因子是否超过 5米 PointType curGPSPoint; curGPSPoint.x = gps_x; curGPSPoint.y = gps_y; curGPSPoint.z = gps_z; if (pointDistance(curGPSPoint, lastGPSPoint) < 5.0) continue; else lastGPSPoint = curGPSPoint; // 添加GPS因子 // 噪声模型 gtsam::Vector Vector3(3); Vector3 << max(noise_x, 1.0f), max(noise_y, 1.0f), max(noise_z, 1.0f); noiseModel::Diagonal::shared_ptr gps_noise = noiseModel::Diagonal::Variances(Vector3); // GPS因子 gtsam::GPSFactor gps_factor(cloudKeyPoses3D->size(), gtsam::Point3(gps_x, gps_y, gps_z), gps_noise); gtSAMgraph.add(gps_factor); aLoopIsClosed = true; break; } } } // void saveKeyFramesAndFactor() { // 检查是否关键帧 if (saveFrame() == false) return; /// 是关键帧,准备因子 // odom factor // Lidar里程计因子(特征匹配LM优化得到的) addOdomFactor(); // gps factor // GPS因子 addGPSFactor(); // cout << "****************************************************" << endl; // gtSAMgraph.print("GTSAM Graph:\n"); // update iSAM // iSAM2算法:优化因子图 isam->update(gtSAMgraph, initialEstimate); isam->update(); // update multiple-times till converge if (aLoopIsClosed == true) { isam->update(); isam->update(); isam->update(); isam->update(); isam->update(); } gtSAMgraph.resize(0); initialEstimate.clear(); // save key poses // 取优化后的数据 PointType thisPose3D; PointTypePose thisPose6D; Pose3 latestEstimate; isamCurrentEstimate = isam->calculateEstimate(); latestEstimate = isamCurrentEstimate.at<Pose3>(isamCurrentEstimate.size()-1); // cout << "****************************************************" << endl; // isamCurrentEstimate.print("Current estimate: "); // 记录当前帧LM位姿经过优化后的结果 thisPose3D.x = latestEstimate.translation().x(); thisPose3D.y = latestEstimate.translation().y(); thisPose3D.z = latestEstimate.translation().z(); thisPose3D.intensity = cloudKeyPoses3D->size(); // this can be used as index cloudKeyPoses3D->push_back(thisPose3D); thisPose6D.x = thisPose3D.x; thisPose6D.y = thisPose3D.y; thisPose6D.z = thisPose3D.z; thisPose6D.intensity = thisPose3D.intensity ; // this can be used as index thisPose6D.roll = latestEstimate.rotation().roll(); thisPose6D.pitch = latestEstimate.rotation().pitch(); thisPose6D.yaw = latestEstimate.rotation().yaw(); thisPose6D.time = timeLaserCloudInfoLast; cloudKeyPoses6D->push_back(thisPose6D); // cout << "****************************************************" << endl; // cout << "Pose covariance:" << endl; // cout << isam->marginalCovariance(isamCurrentEstimate.size()-1) << endl << endl; // 边缘化,得到协方差矩阵 poseCovariance = isam->marginalCovariance(isamCurrentEstimate.size()-1); // save updated transform // 更新位姿 transformTobeMapped[0] = latestEstimate.rotation().roll(); transformTobeMapped[1] = latestEstimate.rotation().pitch(); transformTobeMapped[2] = latestEstimate.rotation().yaw(); transformTobeMapped[3] = latestEstimate.translation().x(); transformTobeMapped[4] = latestEstimate.translation().y(); transformTobeMapped[5] = latestEstimate.translation().z(); // save all the received edge and surf points pcl::PointCloud<PointType>::Ptr thisCornerKeyFrame(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr thisSurfKeyFrame(new pcl::PointCloud<PointType>()); pcl::copyPointCloud(*laserCloudCornerLastDS, *thisCornerKeyFrame); pcl::copyPointCloud(*laserCloudSurfLastDS, *thisSurfKeyFrame); // save key frame cloud // 保存当前帧的边缘点特征、平面点特征 两组点云数据(Lidar坐标系) cornerCloudKeyFrames.push_back(thisCornerKeyFrame); surfCloudKeyFrames.push_back(thisSurfKeyFrame); // save path for visualization updatePath(thisPose6D); // TODO:需要注意的是,这里优化完之后,只对最新关键帧的位姿进行了更新 // 但是实际上,因子图优化有可能会改变图中的某些节点的位姿,这里没有更新,而是在发生回环之后才更新 } void correctPoses() { if (cloudKeyPoses3D->points.empty()) return; // 如果发生回环? if (aLoopIsClosed == true) { // clear path // 清除path,需要更新全局位姿 globalPath.poses.clear(); // update key poses int numPoses = isamCurrentEstimate.size(); // 取新的优化结果,更新全局的位姿 for (int i = 0; i < numPoses; ++i) { cloudKeyPoses3D->points[i].x = isamCurrentEstimate.at<Pose3>(i).translation().x(); cloudKeyPoses3D->points[i].y = isamCurrentEstimate.at<Pose3>(i).translation().y(); cloudKeyPoses3D->points[i].z = isamCurrentEstimate.at<Pose3>(i).translation().z(); cloudKeyPoses6D->points[i].x = cloudKeyPoses3D->points[i].x; cloudKeyPoses6D->points[i].y = cloudKeyPoses3D->points[i].y; cloudKeyPoses6D->points[i].z = cloudKeyPoses3D->points[i].z; cloudKeyPoses6D->points[i].roll = isamCurrentEstimate.at<Pose3>(i).rotation().roll(); cloudKeyPoses6D->points[i].pitch = isamCurrentEstimate.at<Pose3>(i).rotation().pitch(); cloudKeyPoses6D->points[i].yaw = isamCurrentEstimate.at<Pose3>(i).rotation().yaw(); // 更新path updatePath(cloudKeyPoses6D->points[i]); } aLoopIsClosed = false; // ID for reseting IMU pre-integration ++imuPreintegrationResetId; } } void updatePath(const PointTypePose& pose_in) { // 将优化后的(当前帧点云对应的位姿)记录到path中 geometry_msgs::PoseStamped pose_stamped; pose_stamped.header.stamp = timeLaserInfoStamp; pose_stamped.header.frame_id = "odom"; pose_stamped.pose.position.x = pose_in.x; pose_stamped.pose.position.y = pose_in.y; pose_stamped.pose.position.z = pose_in.z; tf::Quaternion q = tf::createQuaternionFromRPY(pose_in.roll, pose_in.pitch, pose_in.yaw); pose_stamped.pose.orientation.x = q.x(); pose_stamped.pose.orientation.y = q.y(); pose_stamped.pose.orientation.z = q.z(); pose_stamped.pose.orientation.w = q.w(); globalPath.poses.push_back(pose_stamped); } // 发布最新的位姿(优化后、回环矫正后) void publishOdometry() { // Publish odometry for ROS nav_msgs::Odometry laserOdometryROS; laserOdometryROS.header.stamp = timeLaserInfoStamp; laserOdometryROS.header.frame_id = "odom"; laserOdometryROS.child_frame_id = "odom_mapping"; laserOdometryROS.pose.pose.position.x = transformTobeMapped[3]; laserOdometryROS.pose.pose.position.y = transformTobeMapped[4]; laserOdometryROS.pose.pose.position.z = transformTobeMapped[5]; laserOdometryROS.pose.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw(transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]); laserOdometryROS.pose.covariance[0] = double(imuPreintegrationResetId); pubOdomAftMappedROS.publish(laserOdometryROS); } void publishFrames() { // if (cloudKeyPoses3D->points.empty()) // return; // publish key poses // 发布所有关键帧位置(使用点云形式储存发布) //publishCloud(&pubKeyPoses, cloudKeyPoses3D, timeLaserInfoStamp, "odom"); // Publish surrounding key frames // 发布世界坐标系下的 局部平面点特征地图 publishCloud(&pubRecentKeyFrames, laserCloudSurfFromMapDS, timeLaserInfoStamp, "odom"); // publish registered key frame if (pubRecentKeyFrame.getNumSubscribers() != 0) { // 取最新优化后位姿 pcl::PointCloud<PointType>::Ptr cloudOut(new pcl::PointCloud<PointType>()); // 将特征点合并,然后转换到世界坐标系下,发布 PointTypePose thisPose6D = trans2PointTypePose(transformTobeMapped); *cloudOut += *transformPointCloud(laserCloudCornerLastDS, &thisPose6D); *cloudOut += *transformPointCloud(laserCloudSurfLastDS, &thisPose6D); publishCloud(&pubRecentKeyFrame, cloudOut, timeLaserInfoStamp, "odom"); } // publish registered high-res raw cloud // 发布原始高分辨率地图 if (pubCloudRegisteredRaw.getNumSubscribers() != 0) { // 取原始点云 pcl::PointCloud<PointType>::Ptr cloudOut(new pcl::PointCloud<PointType>()); pcl::fromROSMsg(cloudInfo.cloud_deskewed, *cloudOut); // 取最新优化后位姿 PointTypePose thisPose6D = trans2PointTypePose(transformTobeMapped); // 转换到世界坐标系下,发布 *cloudOut = *transformPointCloud(cloudOut, &thisPose6D); publishCloud(&pubCloudRegisteredRaw, cloudOut, timeLaserInfoStamp, "odom"); } // // publish path // if (pubPath.getNumSubscribers() != 0) // { // // 发布全局轨迹 // globalPath.header.stamp = timeLaserInfoStamp; // globalPath.header.frame_id = "odom"; // pubPath.publish(globalPath); // } } }; #endif // MAPOPTMIZATION_H
5-64t.c
#include <stdio.h> #include <omp.h> int main() { int i; int sum=0; omp_set_num_threads(64); #pragma omp parallel for for (i=0; i<COUNT; i++) { sum = sum + i; printf("Thread number: %d Iteration: %d Local Sum: %d \n", omp_get_thread_num(), i, sum); } printf("\n All Threads Done – Final Global Sum: %d \n\n", sum); }
test3.c
int main() { int x; #pragma omp parallel { 0; if (1) { 2; if (3) { x = 0; 4; #pragma omp barrier 5; } else { 6; #pragma omp barrier // x; 7; } 8; } else { 9; if (10) { 11; #pragma omp barrier 12; } else { 13; #pragma omp barrier 14; } 15; } 16; } x; 17; }
main.c
void bar(int M, int *restrict T, int N, int *restrict A) { #pragma omp parallel default(shared) { #pragma omp for for (int I = 0; I < N; ++I) { A[I] = I; for (int J = 0; J < M; ++J) A[I] = A[I] + T[J]; } } } void foo(int N, int *A) { int TSize = 4; int T[4]; for (int I = 0; I < TSize; ++I) T[I] = I; #pragma spf region { bar(TSize, T, N, A); } }
GB_unaryop__abs_int32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_uint64 // op(A') function: GB_tran__abs_int32_uint64 // C type: int32_t // A type: uint64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_uint64 ( int32_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
simulation.h
#ifndef SIMULATION_H #define SIMULATION_H #include <filesystem> #include <fstream> #include <vector> #include "selection_type.h" #include "env_change_type.h" #include "adaptation_period.h" #include "environment.h" #include "population.h" //#include <omp.h> double identity_first_element(const std::vector<double>& vector); struct sim_param { NLOHMANN_DEFINE_TYPE_INTRUSIVE(sim_param, seed, change_freq_A, change_freq_B, selection_strength, selection_duration, n_generations, selection_freq, change_sym_type, change_freq_type, sel_type, adaptation_per) sim_param(int seed_n = 0, double change_frequency_A = 0.1, double change_frequency_B = 0.01, double sel_strength = 1, int generations = 100, int selection_frequency = 1, env_change_symmetry_type env_change_symmetry_type = env_change_symmetry_type::symmetrical, env_change_freq_type env_change_freq_type = env_change_freq_type::stochastic, selection_type selec_type = selection_type::constant, adaptation_period adapt_per = adaptation_period::off): seed{seed_n}, change_freq_A{change_frequency_A}, change_freq_B{change_frequency_B}, selection_strength{sel_strength}, n_generations{generations}, selection_freq{selection_frequency}, selection_duration{selection_freq == 0 ? 0 : selection_freq / 10}, change_sym_type{env_change_symmetry_type}, change_freq_type{env_change_freq_type}, sel_type{selec_type}, adaptation_per{adapt_per} {} int seed; double change_freq_A; double change_freq_B; double selection_strength; int n_generations; int selection_freq; int selection_duration; env_change_symmetry_type change_sym_type; env_change_freq_type change_freq_type; selection_type sel_type; adaptation_period adaptation_per; }; bool operator==(const sim_param& lhs, const sim_param& rhs); struct all_params { NLOHMANN_DEFINE_TYPE_INTRUSIVE(all_params, e_p, i_p, p_p, s_p) env_param e_p; ind_param i_p; pop_param p_p; sim_param s_p; }; ///Assigns the given new input to each individual in the simulation template<class Sim> void assign_new_inputs_to_inds(Sim &s, std::vector<double> new_input) { pop::assign_new_inputs_to_inds(s.get_pop(), new_input); } ///Assigns the input in simulation<M> to individuals template<class Sim> void assign_inputs(Sim &s) { pop::assign_new_inputs_to_inds(s.get_pop(), s.create_inputs()); } ///Returns the individuals in the simualtion template<class Sim> const std::vector<typename Sim::pop_t::ind_t> &get_inds(const Sim&s) { return s.get_pop().get_inds(); } ///Returns the input of the individuals template<class Sim> std::vector<double> get_inds_input(const Sim &s) { //assert(all_individuals_have_same_input(s)); return get_inds(s)[0].get_input_values(); } ///Returns the size of the inputs of the individuals template<class Sim> size_t get_inds_input_size(const Sim &s) { return get_inds_input(s).size(); } ///Updates the inputs in simulation and assigns them to individuals template<class Sim> void assign_new_inputs(Sim &s) { auto new_inputs = s.create_inputs(); if(s.get_input().size() > 1){ new_inputs.back() = s.get_input().back(); } s.update_inputs(new_inputs); assign_inputs(s); } template<class Pop = population<>, enum env_change_symmetry_type Env_change_sym = env_change_symmetry_type::symmetrical, enum env_change_freq_type Env_change_freq = env_change_freq_type::stochastic, enum selection_type Sel_Type = selection_type::constant, enum adaptation_period Adapt_per = adaptation_period::off> class simulation { public: using pop_t = Pop; using env_ch_s_t = env_change_symmetry_type; using env_ch_f_t = env_change_freq_type; using sel_t = selection_type; using adapt_p = adaptation_period; static constexpr response_type Resp_type = pop_t::ind_t::net_t::response_t; simulation(int init_pop_size = 1, int seed = 0, double t_change_interval = 0.1, std::vector<int> net_arch = {1,2,1}, double sel_str = 2, int number_of_generations = 1000): m_environment{}, m_population{init_pop_size}, m_n_generations{number_of_generations}, m_seed{seed}, m_t_change_env_distr_A{static_cast<double>(t_change_interval)}, m_t_change_env_distr_B{static_cast<double>(t_change_interval)}, m_sel_str{sel_str}, m_change_freq_A{static_cast<double>(t_change_interval)}, m_change_freq_B{static_cast<double>(t_change_interval)}, m_input(net_arch[0], 1), m_optimal_output{1} { m_rng.seed(m_seed); for(auto& ind : m_population.get_inds_nonconst()) { ind = individual{net_param{net_arch, linear, net_arch}}; } } simulation(const all_params& params): m_environment{params.e_p}, m_population{params.p_p, params.i_p}, m_n_generations{params.s_p.n_generations}, m_seed{params.s_p.seed}, m_t_change_env_distr_A{static_cast<double>(params.s_p.change_freq_A)}, m_t_change_env_distr_B{static_cast<double>(params.s_p.change_freq_B)}, m_sel_str{params.s_p.selection_strength}, m_change_freq_A {static_cast<double>(params.s_p.change_freq_A)}, m_change_freq_B {static_cast<double>(params.s_p.change_freq_B)}, m_selection_frequency{params.s_p.selection_freq}, m_selection_duration{params.s_p.selection_duration}, m_params {params}, m_input(params.i_p.net_par.net_arc[0], 1), //BAD!!! implementation of env function input m_optimal_output{1} { m_rng.seed(m_seed); } NLOHMANN_DEFINE_TYPE_INTRUSIVE(simulation, m_environment, m_population, m_time, m_change_freq_A, m_change_freq_B, m_sel_str, m_seed) ///Returns const ref ot population memeber const Pop& get_pop() const noexcept {return m_population;} ///Returns const ref ot population memeber Pop& get_pop() noexcept {return m_population;} ///Returns ref to rng std::mt19937_64& get_rng() noexcept {return m_rng;} ///Returns ref to environmental rng std::mt19937_64 &get_env_rng() noexcept {return m_environment.get_rng();} ///Returns const ref to env_member const environment& get_env() const noexcept {return m_environment;} ///Returns const ref to env_member environment& get_env() noexcept {return m_environment;} ///Returns the range of the inputs provided by environment const range& get_env_cue_range() const noexcept {return m_environment.get_cue_range();} ///Returns the number of generatiosn for which the simualtion has to run const double& get_mut_step() const noexcept {return m_params.p_p.mut_step;} ///Returns the number of generatiosn for which the simualtion has to run const int& get_n_gen() const noexcept {return m_n_generations;} ///Returns the number of generatiosn for which the simualtion has to run int get_n_trials() const noexcept {return m_population.get_n_trials();} ///returns const ref to Bernoulli distribution for change freq of A const std::bernoulli_distribution& get_t_change_env_distr_A() const noexcept {return m_t_change_env_distr_A;} ///returns const ref to Bernoulli distribution for change freq of B const std::bernoulli_distribution& get_t_change_env_distr_B() const noexcept {return m_t_change_env_distr_B;} ///returns the number of generations the simualtion has run for const int& get_time() const noexcept {return m_time;} ///increases the number of genration the simulations has run for void increase_time() {++m_time;} ///Returns the strength of selection double get_sel_str() const noexcept {return m_sel_str;} ///Returns the number of generations after which ///selection takes place int get_sel_freq() const noexcept {return m_selection_frequency;} ///Returns the number of generations for which ///selection takes place when selection is 'sporadic' int get_sel_duration() const noexcept {return m_selection_duration;} ///Returns change frequency of environment/function A double get_change_freq_A() const noexcept {return m_change_freq_A;} ///Returns change frequency of environment/function B double get_change_freq_B() const noexcept {return m_change_freq_B;} ///Returns seed int get_seed() const noexcept {return m_seed;} ///Returns a reference to the vector of individuals const std::vector<typename Pop::ind_t> &get_inds() const {return m_population.get_inds();}; ///Returns the current inputs in the simulation const std::vector<double> &get_input() const noexcept {return m_input;} ///Returns the current optimal output const double &get_optimal() const noexcept {return m_optimal_output;} ///Checks if environment needs to change bool is_environment_changing(){ if(m_time == 0) return false; if constexpr (Adapt_per == adaptation_period::on) { if(m_time < (m_n_generations / 2)) return false; } if constexpr( Env_change_freq == env_change_freq_type::regular) { if( m_environment.get_name_current_function() == 'A' ) { return std::fmod(get_time(), 1.0/m_change_freq_A) == 0; } else if (m_environment.get_name_current_function() == 'B') { bool change; if constexpr( Env_change_sym == env_change_symmetry_type::asymmetrical) { change = std::fmod(get_time(), 1.0/m_change_freq_B) == 0; } else if(Env_change_sym == env_change_symmetry_type::symmetrical) { change = std::fmod(get_time(), 1.0/m_change_freq_A) == 0; } return change; } } else if( Env_change_freq == env_change_freq_type::stochastic) { if( m_environment.get_name_current_function() == 'A' ) { std::bernoulli_distribution distro = get_t_change_env_distr_A(); return distro (get_env_rng()); } else if (m_environment.get_name_current_function() == 'B') { std::bernoulli_distribution distro; if constexpr( Env_change_sym == env_change_symmetry_type::asymmetrical) { distro = get_t_change_env_distr_B(); } else if(Env_change_sym == env_change_symmetry_type::symmetrical) { distro = get_t_change_env_distr_A(); } return distro (get_env_rng()); } else throw std::runtime_error{"invalid current function name"}; } return false; } ///Returns the function A of the environment const std::function<double(std::vector<double>)> &get_env_function_A() const noexcept {return get_env().get_env_function_A();} ///Returns the number corresponding to the current environmental function ///0 for env_function 'A' ///1 for env_function 'B' int get_number_for_current_env_function() const noexcept {return m_environment.get_name_current_function() - 'A';} ///Updates the optimal to the given value void update_optimal(double new_optimal) {m_optimal_output = new_optimal;} ///Updates the inputs of the simulation with new calculated inputs void update_inputs(std::vector<double> new_inputs){m_input = new_inputs;} ///Gets inputs bsaed on the environment of the simulation /// and updates the input stored in simulation std::vector<double> create_inputs() { auto inputs = env::create_n_inputs(get_env(), get_inds_input_size(*this), get_rng() ); if constexpr(Resp_type == response_type::plastic) { inputs.push_back(get_number_for_current_env_function()); } m_input = inputs; return inputs; } ///Evaluates the operformance of all indiivduals in a population std::vector<double> evaluate_inds(){ std::vector<double> cumulative_performance(get_inds().size(), 0); std::vector<std::vector<double>> inputs(m_population.get_n_trials()); std::vector<double> optimals(inputs.size()); for(int i = 0; i != m_population.get_n_trials(); i++) { inputs[i] = create_inputs(); optimals[i] = env::calculate_optimal(m_environment, inputs[i]); } ///BAD!!! Temporary solutions to pass test update_inputs(inputs[0]); update_optimal(optimals[0]); #pragma omp parallel for for(int i = 0; i < m_population.get_n_trials(); i++) { auto performance = pop::calc_dist_from_target(get_inds(), optimals[i], inputs[i]); #pragma omp critical { std::transform(cumulative_performance.begin(), cumulative_performance.end(), performance.begin(), cumulative_performance.begin(), std::plus<double>()); } } return cumulative_performance; } ///Calculates fitness of inds in pop given current env values const simulation<Pop, Env_change_sym, Env_change_freq, Sel_Type, Adapt_per>& calc_fitness() { auto cumulative_performance = evaluate_inds(); auto fitness_vector = pop::rescale_dist_to_fit(cumulative_performance, get_sel_str()); pop::set_fitness_inds(get_pop(), fitness_vector); return *this; } ///Reproduces inds to next gen based on their fitness void reproduce() { pop::reproduce(get_pop(), get_rng()); } ///Reproduces inds to next gen randomly void reproduce_randomly() { pop::reproduce_random(get_pop(), get_rng()); } ///Calculates fitness and selects a new population based on fitness void select_inds() { if constexpr(Sel_Type == selection_type::sporadic) { if constexpr(Adapt_per == adaptation_period::on) { if(m_time < m_n_generations / 2) { calc_fitness(); reproduce(); return; } } if( m_selection_frequency != 0 && m_time % m_selection_frequency >= 0 && m_time % m_selection_frequency < m_selection_duration ) { calc_fitness(); reproduce(); } else { calc_fitness(); reproduce_randomly(); } } else if constexpr(Sel_Type == selection_type::constant) { calc_fitness(); reproduce(); } else { throw std::runtime_error{"wrong type of selection"}; } } ///Resets the fitness of the population to 0 void reset_fit_pop() { m_population.reset_fitness(); } const all_params& get_params() const noexcept {return m_params;} private: environment m_environment; Pop m_population; int m_n_generations; std::mt19937_64 m_rng; int m_seed; std::bernoulli_distribution m_t_change_env_distr_A; std::bernoulli_distribution m_t_change_env_distr_B; int m_time = 0; double m_sel_str; double m_change_freq_A; double m_change_freq_B; ///Every how many generations indiivdual are selected int m_selection_frequency; //For how many generations individuals are selected //A tenth of the selection frequency int m_selection_duration; all_params m_params; ///The current inputs that the networks of individuals will recieve std::vector<double> m_input; ///The optimal output at a given moment; /// depends on inputs and environmental function double m_optimal_output; }; ///Loads a sim object from json template<class Class> Class load_json( const std::string& filename ) { std::ifstream f(filename); nlohmann::json json_in; f >> json_in; Class s; return s = json_in; } ///Saves the enitre GODDDAM SIMULATIONNNN!!!!!!! WHOO NEEDS MEMORRYYYY template<class Class> void save_json(const Class& s, const std::string& filename) { auto path = std::filesystem::current_path(); path /= filename; if(std::filesystem::exists(path)) { std::cout << "overriding previous results" << std::endl; } std::ofstream f; f.open(filename); if(f.is_open()) { nlohmann::json json_out; json_out = s; f << json_out; } else { throw std::runtime_error{"could not open output stream for saving!"}; } f.close(); } namespace sim { ///Checks if 2 simulations are equal template<class Pop> bool operator ==(const simulation<Pop>& lhs, const simulation<Pop>& rhs); ///Checks if all the individuals in a simulated population have the same input template<class Sim> bool all_individuals_have_same_input(const Sim &s) { auto p = s.get_pop(); return pop::all_individuals_have_same_input(p); } ///Calculates the optimal output template<class Sim> double calculate_optimal(const Sim &s) { return(env::calculate_optimal(s.get_env(), s.get_input())); } ///Returns a population whose fitness has been calculated template<class Sim> typename Sim::pop_t calc_fitness_of_pop(Sim s) { s.update_optimal(env::calculate_optimal(s.get_env(), s.get_input())); return pop::calc_fitness(s.get_pop(), s.get_optimal(), s.get_sel_str(), s.get_input()); } ///Calculates the avg_fitness of the population template<class Sim> double avg_fitness(const Sim& s) { return pop::avg_fitness(s.get_pop()); } ///Changes all the weights of a given individual to a given value template<class Sim> void change_all_weights_nth_ind(Sim& s, size_t ind_index, double new_weight); ///Changes the network of the nth individual for a given network template<class Pop> void change_nth_ind_net(simulation<Pop>& s, size_t ind_index, const typename Pop::ind_t::net_t &n) { pop::change_nth_ind_net(s.get_pop(), ind_index, n) ; } ///Gets const ref the best n individuals in a pop template<class Sim> std::vector<typename Sim::pop_t::ind_t> get_best_n_inds(const Sim& s, int n) { return pop::get_best_n_inds(s.get_pop(), n); } ///Returns the current optimal function of the environment template<class Sim> std::function<double(std::vector<double>)> get_current_env_function(const Sim &s); ///Gets the name of the current environmental function template<class Sim> char get_name_current_function(const Sim& s) noexcept { return s.get_env().get_name_current_function(); } ///Returns the fitness of the nth ind in pop template<class Sim> double get_nth_ind_fitness(const Sim& s, const size_t ind_index); ///Returns const or non-onst ref to the network of the nth individual in the /// popoulation member of a simulation template<class Sim> const typename Sim::pop_t::ind_t::net_t& get_nth_ind_net(const Sim& s, size_t ind_index); ///Switches the function of the environment used to calculate the optimal output template<class Sim> void switch_optimal_function(Sim &s) { env::switch_env_function(s.get_env()); } ///Wrapper function; does everything that needs doing when the environment changes template<class Sim> void perform_environment_change(Sim &s) { switch_optimal_function(s); } ///checks if the individuals in the populations from 2 different simulations ///have exactly the same fitness values template<class Sim> bool pops_have_same_fitness(const Sim& lhs, const Sim& rhs) { return pop::extract_fitnesses(lhs.get_inds()) == pop::extract_fitnesses(rhs.get_inds()); } ///sums the fitness of all individuals of a simulation toghether template<class Sim> double sum_of_fitnesses(const Sim& s) { auto fitnesses = pop::extract_fitnesses(s.get_inds()); return std::accumulate(fitnesses.begin(), fitnesses.end(), 0.0); } ///Ticks time one generation into the future template<class Sim> void tick(Sim &s) { if(s.is_environment_changing()){ perform_environment_change(s); } s.select_inds(); s.increase_time(); } ///Calculates the standard devaition of the population fitness template<class Sim> double var_fitness(const Sim&s) { return pop::stdev_fitness(s.get_pop()); } ///Get the inputs of the individuals in the simulation. Requires all individuals to have the same input. template<class Sim> const std::vector<double> &get_current_input(const Sim &s); ///Returns the input of the nth individual in the population template<class Sim> const std::vector<double> &get_nth_individual_input(const Sim &s, const int n); ///Updates the input with the current environmental indicator template<class Sim> void update_env_indicator(Sim &s); } void test_simulation() noexcept; #endif // SIMULATION_H
par_amgdd_fac_cycle.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" HYPRE_Int hypre_BoomerAMGDD_FAC( void *amgdd_vdata, HYPRE_Int first_iteration ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; HYPRE_Int cycle_type = hypre_ParAMGDDDataFACCycleType(amgdd_data); HYPRE_Int start_level = hypre_ParAMGDDDataStartLevel(amgdd_data); if (cycle_type == 1 || cycle_type == 2) { hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, start_level, cycle_type, first_iteration); } else if (cycle_type == 3) { hypre_BoomerAMGDD_FAC_FCycle(amgdd_vdata, first_iteration); } else { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "WARNING: unknown AMG-DD FAC cycle type. Defaulting to 1 (V-cycle).\n"); hypre_ParAMGDDDataFACCycleType(amgdd_data) = 1; hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, start_level, 1, first_iteration); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_Cycle( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_type, HYPRE_Int first_iteration ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_ParAMGData *amg_data = hypre_ParAMGDDDataAMG(amgdd_data); hypre_AMGDDCompGrid **compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data); HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(amg_data); HYPRE_Int i; // Relax on the real nodes hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, level, 1); // Restrict the residual at all fine points (real and ghost) and set residual at coarse points not under the fine grid if (num_levels > 1) { hypre_BoomerAMGDD_FAC_Restrict(compGrid[level], compGrid[level + 1], first_iteration); hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridS(compGrid[level]), 0.0); hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridT(compGrid[level]), 0.0); // Either solve on the coarse level or recurse if (level + 1 == num_levels - 1) { hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, num_levels - 1, 3); } else for (i = 0; i < cycle_type; i++) { hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, level + 1, cycle_type, first_iteration); first_iteration = 0; } // Interpolate up and relax hypre_BoomerAMGDD_FAC_Interpolate(compGrid[level], compGrid[level + 1]); } hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, level, 2); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_FCycle( void *amgdd_vdata, HYPRE_Int first_iteration ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_ParAMGData *amg_data = hypre_ParAMGDDDataAMG(amgdd_data); hypre_AMGDDCompGrid **compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data); HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(amg_data); HYPRE_Int level; // ... work down to coarsest ... if (!first_iteration) { for (level = hypre_ParAMGDDDataStartLevel(amgdd_data); level < num_levels - 1; level++) { hypre_BoomerAMGDD_FAC_Restrict(compGrid[level], compGrid[level + 1], 0); hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridS(compGrid[level]), 0.0); hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridT(compGrid[level]), 0.0); } } // ... solve on coarsest level ... hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, num_levels - 1, 3); // ... and work back up to the finest for (level = num_levels - 2; level > -1; level--) { // Interpolate up and relax hypre_BoomerAMGDD_FAC_Interpolate(compGrid[level], compGrid[level + 1]); // V-cycle hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, level, 1, 0); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_Interpolate( hypre_AMGDDCompGrid *compGrid_f, hypre_AMGDDCompGrid *compGrid_c ) { hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridP(compGrid_f), hypre_AMGDDCompGridU(compGrid_c), 1.0, hypre_AMGDDCompGridU(compGrid_f)); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_Restrict( hypre_AMGDDCompGrid *compGrid_f, hypre_AMGDDCompGrid *compGrid_c, HYPRE_Int first_iteration ) { // Recalculate residual on coarse grid if (!first_iteration) { hypre_AMGDDCompGridMatvec(-1.0, hypre_AMGDDCompGridA(compGrid_c), hypre_AMGDDCompGridU(compGrid_c), 1.0, hypre_AMGDDCompGridF(compGrid_c)); } // Get update: s_l <- A_lt_l + s_l hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridA(compGrid_f), hypre_AMGDDCompGridT(compGrid_f), 1.0, hypre_AMGDDCompGridS(compGrid_f)); // If we need to preserve the updates on the next level if (hypre_AMGDDCompGridS(compGrid_c)) { hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridR(compGrid_f), hypre_AMGDDCompGridS(compGrid_f), 0.0, hypre_AMGDDCompGridS(compGrid_c)); // Subtract restricted update from recalculated residual: f_{l+1} <- f_{l+1} - s_{l+1} hypre_AMGDDCompGridVectorAxpy(-1.0, hypre_AMGDDCompGridS(compGrid_c), hypre_AMGDDCompGridF(compGrid_c)); } else { // Restrict and subtract update from recalculated residual: f_{l+1} <- f_{l+1} - P_l^Ts_l hypre_AMGDDCompGridMatvec(-1.0, hypre_AMGDDCompGridR(compGrid_f), hypre_AMGDDCompGridS(compGrid_f), 1.0, hypre_AMGDDCompGridF(compGrid_c)); } // Zero out initial guess on coarse grid hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridU(compGrid_c), 0.0); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_Relax( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_param ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; HYPRE_Int numRelax = hypre_ParAMGDDDataFACNumRelax(amgdd_data); HYPRE_Int i; if (hypre_AMGDDCompGridT(compGrid) || hypre_AMGDDCompGridQ(compGrid)) { hypre_AMGDDCompGridVectorCopy(hypre_AMGDDCompGridU(compGrid), hypre_AMGDDCompGridTemp(compGrid)); hypre_AMGDDCompGridVectorScale(-1.0, hypre_AMGDDCompGridTemp(compGrid)); } for (i = 0; i < numRelax; i++) { (*hypre_ParAMGDDDataUserFACRelaxation(amgdd_data))(amgdd_vdata, level, cycle_param); } if (hypre_AMGDDCompGridT(compGrid) || hypre_AMGDDCompGridQ(compGrid)) { hypre_AMGDDCompGridVectorAxpy(1.0, hypre_AMGDDCompGridU(compGrid), hypre_AMGDDCompGridTemp(compGrid)); if (hypre_AMGDDCompGridT(compGrid)) { hypre_AMGDDCompGridVectorAxpy(1.0, hypre_AMGDDCompGridTemp(compGrid), hypre_AMGDDCompGridT(compGrid)); } if (hypre_AMGDDCompGridQ(compGrid)) { hypre_AMGDDCompGridVectorAxpy(1.0, hypre_AMGDDCompGridTemp(compGrid), hypre_AMGDDCompGridQ(compGrid)); } } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_Jacobi( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_param ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid); HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(memory_location); if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGDD_FAC_JacobiDevice(amgdd_vdata, level); } else #endif { hypre_BoomerAMGDD_FAC_JacobiHost(amgdd_vdata, level); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_JacobiHost( void *amgdd_vdata, HYPRE_Int level ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; HYPRE_Real relax_weight = hypre_ParAMGDDDataFACRelaxWeight(amgdd_data); HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid); hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid); hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid); hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid); hypre_CSRMatrix *diag; HYPRE_Int total_real_nodes; HYPRE_Int i, j; // Calculate l1_norms if necessary (right now, I'm just using this vector for the diagonal of A and doing straight ahead Jacobi) if (!hypre_AMGDDCompGridL1Norms(compGrid)) { total_real_nodes = hypre_AMGDDCompGridNumOwnedNodes(compGrid) + hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); hypre_AMGDDCompGridL1Norms(compGrid) = hypre_CTAlloc(HYPRE_Real, total_real_nodes, memory_location); diag = hypre_AMGDDCompGridMatrixOwnedDiag(A); for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++) { for (j = hypre_CSRMatrixI(diag)[i]; j < hypre_CSRMatrixI(diag)[i + 1]; j++) { // hypre_AMGDDCompGridL1Norms(compGrid)[i] += fabs(hypre_CSRMatrixData(diag)[j]); if (hypre_CSRMatrixJ(diag)[j] == i) { hypre_AMGDDCompGridL1Norms(compGrid)[i] = hypre_CSRMatrixData(diag)[j]; } } } diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A); for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++) { for (j = hypre_CSRMatrixI(diag)[i]; j < hypre_CSRMatrixI(diag)[i + 1]; j++) { // hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)] += fabs(hypre_CSRMatrixData(diag)[j]); if (hypre_CSRMatrixJ(diag)[j] == i) { hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes( compGrid)] = hypre_CSRMatrixData(diag)[j]; } } } } // Allocate temporary vector if necessary if (!hypre_AMGDDCompGridTemp2(compGrid)) { hypre_AMGDDCompGridTemp2(compGrid) = hypre_AMGDDCompGridVectorCreate(); hypre_AMGDDCompGridVectorInitialize(hypre_AMGDDCompGridTemp2(compGrid), hypre_AMGDDCompGridNumOwnedNodes(compGrid), hypre_AMGDDCompGridNumNonOwnedNodes(compGrid), hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid)); } hypre_AMGDDCompGridVectorCopy(f, hypre_AMGDDCompGridTemp2(compGrid)); hypre_AMGDDCompGridMatvec(-relax_weight, A, u, relax_weight, hypre_AMGDDCompGridTemp2(compGrid)); for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++) { hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u))[i] += hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridTemp2(compGrid)))[i] / hypre_AMGDDCompGridL1Norms(compGrid)[i]; } for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++) { hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u))[i] += hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridTemp2(compGrid)))[i] / hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)]; } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_GaussSeidel( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_param ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid); hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid); hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid); hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(A); hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(A); hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A); hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(A); HYPRE_Complex *u_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u)); HYPRE_Complex *u_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u)); HYPRE_Complex *f_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(f)); HYPRE_Complex *f_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(f)); HYPRE_Int i, j; // loop variables HYPRE_Complex diagonal; // placeholder for the diagonal of A // Do Gauss-Seidel relaxation on the owned nodes for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++) { // Initialize u as RHS u_owned_data[i] = f_owned_data[i]; diagonal = 0.0; // Loop over diag entries for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i + 1]; j++) { if (hypre_CSRMatrixJ(owned_diag)[j] == i) { diagonal = hypre_CSRMatrixData(owned_diag)[j]; } else { u_owned_data[i] -= hypre_CSRMatrixData(owned_diag)[j] * u_owned_data[ hypre_CSRMatrixJ( owned_diag)[j] ]; } } // Loop over offd entries for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i + 1]; j++) { u_owned_data[i] -= hypre_CSRMatrixData(owned_offd)[j] * u_nonowned_data[ hypre_CSRMatrixJ( owned_offd)[j] ]; } // Divide by diagonal if (diagonal == 0.0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_GaussSeidel().\n"); } u_owned_data[i] /= diagonal; } // Do Gauss-Seidel relaxation on the nonowned nodes for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++) { // Initialize u as RHS u_nonowned_data[i] = f_nonowned_data[i]; diagonal = 0.0; // Loop over diag entries for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i + 1]; j++) { if (hypre_CSRMatrixJ(nonowned_diag)[j] == i) { diagonal = hypre_CSRMatrixData(nonowned_diag)[j]; } else { u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_diag)[j] * u_nonowned_data[ hypre_CSRMatrixJ( nonowned_diag)[j] ]; } } // Loop over offd entries for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i + 1]; j++) { u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_offd)[j] * u_owned_data[ hypre_CSRMatrixJ( nonowned_offd)[j] ]; } // Divide by diagonal if (diagonal == 0.0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_GaussSeidel().\n"); } u_nonowned_data[i] /= diagonal; } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_OrderedGaussSeidel( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_param ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid); hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid); hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid); HYPRE_Int unordered_i, i, j; // loop variables HYPRE_Complex diagonal; // placeholder for the diagonal of A if (!hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid)) { hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid) = hypre_CTAlloc(HYPRE_Int, hypre_AMGDDCompGridNumOwnedNodes(compGrid), hypre_AMGDDCompGridMemoryLocation(compGrid)); hypre_topo_sort(hypre_CSRMatrixI(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA( compGrid))), hypre_CSRMatrixJ(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid))), hypre_CSRMatrixData(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid))), hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid), hypre_AMGDDCompGridNumOwnedNodes(compGrid)); } if (!hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid)) { hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid) = hypre_CTAlloc(HYPRE_Int, hypre_AMGDDCompGridNumNonOwnedNodes(compGrid), hypre_AMGDDCompGridMemoryLocation(compGrid)); hypre_topo_sort(hypre_CSRMatrixI(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA( compGrid))), hypre_CSRMatrixJ(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid))), hypre_CSRMatrixData(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid))), hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid), hypre_AMGDDCompGridNumNonOwnedNodes(compGrid)); } // Get all the info HYPRE_Complex *u_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u)); HYPRE_Complex *u_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u)); HYPRE_Complex *f_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(f)); HYPRE_Complex *f_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(f)); hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(A); hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(A); hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A); hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(A); // Do Gauss-Seidel relaxation on the nonowned real nodes for (unordered_i = 0; unordered_i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); unordered_i++) { i = hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid)[unordered_i]; // Initialize u as RHS u_nonowned_data[i] = f_nonowned_data[i]; diagonal = 0.0; // Loop over diag entries for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i + 1]; j++) { if (hypre_CSRMatrixJ(nonowned_diag)[j] == i) { diagonal = hypre_CSRMatrixData(nonowned_diag)[j]; } else { u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_diag)[j] * u_nonowned_data[ hypre_CSRMatrixJ( nonowned_diag)[j] ]; } } // Loop over offd entries for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i + 1]; j++) { u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_offd)[j] * u_owned_data[ hypre_CSRMatrixJ( nonowned_offd)[j] ]; } // Divide by diagonal if (diagonal == 0.0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_OrderedGaussSeidel().\n"); } u_nonowned_data[i] /= diagonal; } // Do Gauss-Seidel relaxation on the owned nodes for (unordered_i = 0; unordered_i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); unordered_i++) { i = hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid)[unordered_i]; // Initialize u as RHS u_owned_data[i] = f_owned_data[i]; diagonal = 0.0; // Loop over diag entries for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i + 1]; j++) { if (hypre_CSRMatrixJ(owned_diag)[j] == i) { diagonal = hypre_CSRMatrixData(owned_diag)[j]; } else { u_owned_data[i] -= hypre_CSRMatrixData(owned_diag)[j] * u_owned_data[ hypre_CSRMatrixJ( owned_diag)[j] ]; } } // Loop over offd entries for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i + 1]; j++) { u_owned_data[i] -= hypre_CSRMatrixData(owned_offd)[j] * u_nonowned_data[ hypre_CSRMatrixJ( owned_offd)[j] ]; } // Divide by diagonal if (diagonal == 0.0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_OrderedGaussSeidel().\n"); } u_owned_data[i] /= diagonal; } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_CFL1Jacobi( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_param ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid); HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(memory_location); if (exec == HYPRE_EXEC_DEVICE) { if (cycle_param == 1) { hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 1); hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, -1); } else if (cycle_param == 2) { hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, -1); hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 1); } else { hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, -1); } } else #endif { if (cycle_param == 1) { hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 1); hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, -1); } else if (cycle_param == 2) { hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, -1); hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 1); } else { hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, -1); } } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_CFL1JacobiHost( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int relax_set ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; HYPRE_Real relax_weight = hypre_ParAMGDDDataFACRelaxWeight(amgdd_data); hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA( compGrid)); hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(hypre_AMGDDCompGridA( compGrid)); hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA( compGrid)); hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(hypre_AMGDDCompGridA( compGrid)); HYPRE_Complex *owned_u = hypre_VectorData(hypre_AMGDDCompGridVectorOwned( hypre_AMGDDCompGridU(compGrid))); HYPRE_Complex *nonowned_u = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned( hypre_AMGDDCompGridU(compGrid))); HYPRE_Complex *owned_f = hypre_VectorData(hypre_AMGDDCompGridVectorOwned( hypre_AMGDDCompGridF(compGrid))); HYPRE_Complex *nonowned_f = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned( hypre_AMGDDCompGridF(compGrid))); HYPRE_Real *l1_norms = hypre_AMGDDCompGridL1Norms(compGrid); HYPRE_Int *cf_marker = hypre_AMGDDCompGridCFMarkerArray(compGrid); HYPRE_Complex *owned_tmp; HYPRE_Complex *nonowned_tmp; HYPRE_Int i, j; HYPRE_Real res; /*----------------------------------------------------------------- * Create and initialize Temp2 vector if not done before. *-----------------------------------------------------------------*/ if (!hypre_AMGDDCompGridTemp2(compGrid)) { hypre_AMGDDCompGridTemp2(compGrid) = hypre_AMGDDCompGridVectorCreate(); hypre_AMGDDCompGridVectorInitialize(hypre_AMGDDCompGridTemp2(compGrid), hypre_AMGDDCompGridNumOwnedNodes(compGrid), hypre_AMGDDCompGridNumNonOwnedNodes(compGrid), hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid)); } owned_tmp = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridTemp2(compGrid))); nonowned_tmp = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridTemp2( compGrid))); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++) { owned_tmp[i] = owned_u[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedNodes(compGrid); i++) { nonowned_tmp[i] = nonowned_u[i]; } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++) { if (cf_marker[i] == relax_set) { res = owned_f[i]; for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i + 1]; j++) { res -= hypre_CSRMatrixData(owned_diag)[j] * owned_tmp[ hypre_CSRMatrixJ(owned_diag)[j] ]; } for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i + 1]; j++) { res -= hypre_CSRMatrixData(owned_offd)[j] * nonowned_tmp[ hypre_CSRMatrixJ(owned_offd)[j] ]; } owned_u[i] += (relax_weight * res) / l1_norms[i]; } } for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++) { if (cf_marker[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)] == relax_set) { res = nonowned_f[i]; for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i + 1]; j++) { res -= hypre_CSRMatrixData(nonowned_diag)[j] * nonowned_tmp[ hypre_CSRMatrixJ(nonowned_diag)[j] ]; } for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i + 1]; j++) { res -= hypre_CSRMatrixData(nonowned_offd)[j] * owned_tmp[ hypre_CSRMatrixJ(nonowned_offd)[j] ]; } nonowned_u[i] += (relax_weight * res) / l1_norms[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)]; } } return hypre_error_flag; }
ep-brisbane.c
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB EP code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// //-------------------------------------------------------------------- // program EMBAR //-------------------------------------------------------------------- // This is the serial version of the APP Benchmark 1, // the "embarassingly parallel" benchmark. // // // M is the Log_2 of the number of complex pairs of uniform (0, 1) random // numbers. MK is the Log_2 of the size of each batch of uniform random // numbers. MK can be set for convenience on a given system, since it does // not affect the results. //-------------------------------------------------------------------- #include <brisbane/brisbane.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "type.h" #include "npbparams.h" //#include "randdp.h" #include "timers.h" #include "print_results.h" //#include "accelmath.h" #ifdef SPEC_NO_INLINE #define INLINE #else #ifdef SPEC_NO_STATIC_INLINE #define INLINE inline #else #define INLINE static inline #endif #endif #define MAX(X,Y) (((X) > (Y)) ? (X) : (Y)) //#define MK 16 //#define MM (M - MK) //#define NN (1 << MM) //#define NK (1 << MK) //#define NQ 10 #define EPSILON 1.0e-8 #define A 1220703125.0 #define S 271828183.0 #ifndef SPEC_BLOCK_SIZE #define BLKSIZE 1792 #else #define BLKSIZE SPEC_BLOCK_SIZE #endif #define r23 1.1920928955078125e-07 #define r46 r23 * r23 #define t23 8.388608e+06 #define t46 t23 * t23 #pragma omp declare target INLINE double randlc_ep( double *x, double a ) { //-------------------------------------------------------------------- // // This routine returns a uniform pseudorandom double precision number in the // range (0, 1) by using the linear congruential generator // // x_{k+1} = a x_k (mod 2^46) // // where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers // before repeating. The argument A is the same as 'a' in the above formula, // and X is the same as x_0. A and X must be odd double precision integers // in the range (1, 2^46). The returned value randlc_ep is normalized to be // between 0 and 1, i.e. randlc_ep = 2^(-46) * x_1. X is updated to contain // the new seed x_1, so that subsequent calls to randlc_ep using the same // arguments will generate a continuous sequence. // // This routine should produce the same results on any computer with at least // 48 mantissa bits in double precision floating point data. On 64 bit // systems, double precision should be disabled. // // David H. Bailey October 26, 1990 // //-------------------------------------------------------------------- // r23 = pow(0.5, 23.0); //// pow(0.5, 23.0) = 1.1920928955078125e-07 // r46 = r23 * r23; // t23 = pow(2.0, 23.0); //// pow(2.0, 23.0) = 8.388608e+06 // t46 = t23 * t23; /* const double r23 = 1.1920928955078125e-07; const double r46 = r23 * r23; const double t23 = 8.388608e+06; const double t46 = t23 * t23; */ double t1, t2, t3, t4, a1, a2, x1, x2, z; double r; //-------------------------------------------------------------------- // Break A into two parts such that A = 2^23 * A1 + A2. //-------------------------------------------------------------------- t1 = r23 * a; a1 = (int) t1; a2 = a - t23 * a1; //-------------------------------------------------------------------- // Break X into two parts such that X = 2^23 * X1 + X2, compute // Z = A1 * X2 + A2 * X1 (mod 2^23), and then // X = 2^23 * Z + A2 * X2 (mod 2^46). //-------------------------------------------------------------------- t1 = r23 * (*x); x1 = (int) t1; x2 = *x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int) (r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int) (r46 * t3); *x = t3 - t46 * t4; r = r46 * (*x); return r; } #pragma omp end declare target int main() { brisbane_init(NULL, NULL, true); double Mops, t1, t2, t3, t4, x1, x2; double sx, sy, tm, an, tt, gc; double sx_verify_value, sy_verify_value, sx_err, sy_err; int np; int i, ik, kk, l, k, nit; int j; int verified, timers_enabled; double q0, q1, q2, q3, q4, q5, q6, q7, q8, q9; double *x; double *q; double *xx, *qq; /*variables for inlining vranlc()*/ double in_t1, in_t2, in_t3, in_t4; double in_a1, in_a2, in_x1, in_x2, in_z; double tmp_sx, tmp_sy; double dum[3] = {1.0, 1.0, 1.0}; char size[16]; int blksize = BLKSIZE; int blk, koff, numblks; int m, mk, mm, nn, nk, nq; char xclass; FILE *fp; if ((fp = fopen("timer.flag", "r")) == NULL) { timers_enabled = 0; } else { timers_enabled = 1; fclose(fp); } if ((fp = fopen("ep.input", "r")) != NULL) { int result; printf(" Reading from input file ep.input\n"); result = fscanf(fp, "%d", &m); while (fgetc(fp) != '\n'); result = fscanf(fp, "%c", &xclass); while (fgetc(fp) != '\n'); fclose(fp); } else { printf(" No input file. Using compiled defaults \n"); m = M; xclass = CLASS; } mk = 16; mm = m - mk; nk = (1 << mk); np = (1 << mm); nq = 10; if (np < blksize) { blksize = np; } numblks = ceil( (double)np / (double) blksize); x = (double*)malloc(2*nk*sizeof(double)); xx = (double*)malloc(blksize*2*nk*sizeof(double)); q = (double*)malloc(nq*sizeof(double)); qq = (double*)malloc(blksize*nq*sizeof(double)); //-------------------------------------------------------------------- // Because the size of the problem is too large to store in a 32-bit // integer for some classes, we put it into a string (for printing). // Have to strip off the decimal point put in there by the floating // point print statement (internal file) //-------------------------------------------------------------------- sprintf(size, "%15.0lf", pow(2.0, M+1)); j = 14; if (size[j] == '.') j--; size[j+1] = '\0'; printf("\n\n NAS Parallel Benchmarks (NPB3.3-OPENMP-C) - EP Benchmark\n"); printf("\n Number of random numbers generated: %15s\n", size); verified = 0; //-------------------------------------------------------------------- // Compute the number of "batches" of random number pairs generated // per processor. Adjust if the number of processors does not evenly // divide the total number //-------------------------------------------------------------------- //-------------------------------------------------------------------- // Call the random number generator functions and initialize // the x-array to reduce the effects of paging on the timings. // Also, call all mathematical functions that are used. Make // sure these initializations cannot be eliminated as dead code. //-------------------------------------------------------------------- printf("[%s:%d] np[%d] nk[%d] blksize[%d] nq[%d]\n", __FILE__, __LINE__, np, nk, blksize, nq); brisbane_mem mem_x; brisbane_mem mem_xx; brisbane_mem mem_q; brisbane_mem mem_qq; brisbane_mem_create(2 * nk * sizeof(double), &mem_x); brisbane_mem_create(blksize * 2 * nk * sizeof(double), &mem_xx); brisbane_mem_create(nq * sizeof(double), &mem_q); brisbane_mem_create(blksize * nq * sizeof(double), &mem_qq); #pragma omp target data map(alloc:x[0:2*nk],xx[0:blksize*2*nk],qq[0:blksize*nq]) map(from:q[0:nq]) { dum[0] = randlc_ep(&dum[1], dum[2]); size_t kernel_init_x_off[1] = { 0 }; size_t kernel_init_x_idx[1] = { 2 * nk }; brisbane_kernel kernel_init_x; brisbane_kernel_create("init_x", &kernel_init_x); brisbane_kernel_setmem(kernel_init_x, 0, mem_x, brisbane_wr); brisbane_task task0; brisbane_task_create(&task0); brisbane_task_kernel(task0, kernel_init_x, 1, kernel_init_x_off, kernel_init_x_idx); brisbane_task_submit(task0, brisbane_gpu, NULL, true); /* #pragma omp target teams distribute parallel for simd map(x[:0]) for (i = 0; i < 2 * nk; i++) { x[i] = -1.0e99; } */ size_t kernel_init_q_off[1] = { 0 }; size_t kernel_init_q_idx[1] = { nq }; brisbane_kernel kernel_init_q; brisbane_kernel_create("init_q", &kernel_init_q); brisbane_kernel_setmem(kernel_init_q, 0, mem_q, brisbane_wr); brisbane_task task1; brisbane_task_create(&task1); brisbane_task_kernel(task1, kernel_init_q, 1, kernel_init_q_off, kernel_init_q_idx); brisbane_task_submit(task1, brisbane_gpu, NULL, true); /* #pragma omp target teams distribute parallel for simd map(q[:0]) for (i = 0; i < nq; i++) { q[i] = 0.0; } */ Mops = log(sqrt(fabs(MAX(1.0, 1.0)))); timer_clear(0); timer_clear(1); timer_clear(2); timer_start(0); /*this function actullay does nothing, so comment it*/ //vranlc(0, &t1, A, x); //#pragma omp target update to(x[0:2*NK]) //-------------------------------------------------------------------- // Compute AN = A ^ (2 * NK) (mod 2^46). //-------------------------------------------------------------------- t1 = A; for (i = 0; i < mk + 1; i++) { t2 = randlc_ep(&t1, t1); } an = t1; tt = S; gc = 0.0; sx = 0.0; sy = 0.0; for (blk=0; blk < numblks; ++blk) { koff = blk*blksize; if (koff + blksize > np) { blksize = np - (blk*blksize); } size_t kernel_qq_xx_off[1] = { 0 }; size_t kernel_qq_xx_idx[1] = { blksize }; brisbane_kernel kernel_qq_xx; brisbane_kernel_create("qq_xx", &kernel_qq_xx); brisbane_kernel_setmem(kernel_qq_xx, 0, mem_qq, brisbane_wr); brisbane_kernel_setmem(kernel_qq_xx, 1, mem_xx, brisbane_wr); brisbane_kernel_setmem(kernel_qq_xx, 2, mem_x, brisbane_rd); brisbane_kernel_setarg(kernel_qq_xx, 3, sizeof(int), &nq); brisbane_kernel_setarg(kernel_qq_xx, 4, sizeof(int), &nk); brisbane_task task2; brisbane_task_create(&task2); brisbane_task_kernel(task2, kernel_qq_xx, 1, kernel_qq_xx_off, kernel_qq_xx_idx); brisbane_task_submit(task2, brisbane_gpu, NULL, true); /* #pragma omp target teams distribute parallel for map(x[:0],xx[:0],qq[:0]) for(k=0; k<blksize; k++) { #pragma omp simd for(i=0; i<nq; i++) qq[k*nq + i] = 0.0; #pragma omp simd for(i=0; i<2*nk; i++) xx[k*2*nk + i] = x[i]; } */ //-------------------------------------------------------------------- // Each instance of this loop may be performed independently. We compute // the k offsets separately to take into account the fact that some nodes // have more numbers to generate than others //-------------------------------------------------------------------- brisbane_mem mem_sx; brisbane_mem mem_sy; brisbane_mem_create(sizeof(double), &mem_sx); brisbane_mem_create(sizeof(double), &mem_sy); brisbane_mem_reduce(mem_sx, brisbane_sum, brisbane_double); brisbane_mem_reduce(mem_sy, brisbane_sum, brisbane_double); size_t kernel_core_off[1] = { 0 }; size_t kernel_core_idx[1] = { blksize }; brisbane_kernel kernel_core; brisbane_kernel_create("core", &kernel_core); brisbane_kernel_setmem(kernel_core, 0, mem_xx, brisbane_rdwr); brisbane_kernel_setmem(kernel_core, 1, mem_qq, brisbane_rdwr); brisbane_kernel_setarg(kernel_core, 2, sizeof(int), &koff); brisbane_kernel_setarg(kernel_core, 3, sizeof(double), &an); brisbane_kernel_setarg(kernel_core, 4, sizeof(int), &nk); brisbane_kernel_setarg(kernel_core, 5, sizeof(int), &blksize); brisbane_kernel_setarg(kernel_core, 6, sizeof(int), &nq); brisbane_kernel_setmem(kernel_core, 7, mem_sx, brisbane_rdwr); brisbane_kernel_setmem(kernel_core, 9, mem_sy, brisbane_rdwr); brisbane_task task3; brisbane_task_create(&task3); brisbane_task_kernel(task3, kernel_core, 1, kernel_core_off, kernel_core_idx); brisbane_task_d2h(task3, mem_sx, 0, sizeof(double), &sx); brisbane_task_d2h(task3, mem_sy, 0, sizeof(double), &sy); brisbane_task_submit(task3, brisbane_gpu, NULL, true); #if 0 #pragma omp target teams distribute parallel for map(tofrom: sx,sy) private(i,t1,t2,t3,l,kk,ik,in_t1,in_t2,in_t3,in_t4,in_a1,in_a2,in_x1,in_x2,x1,x2,t4,in_z,tmp_sx,tmp_sy) shared(koff,an,xx,nk,blksize,qq,nq) default(none) reduction(+:sx,sy) map(xx[:0],qq[:0]) for (k = 0; k < blksize; k++) { kk = k + koff; t1 = S; t2 = an; // Find starting seed t1 for this kk. for (i = 1; i <= 100; i++) { ik = kk / 2; if ((2 * ik) != kk) t3 = randlc_ep(&t1, t2); if (ik == 0) break; t3 = randlc_ep(&t2, t2); kk = ik; } //-------------------------------------------------------------------- // Compute uniform pseudorandom numbers. //-------------------------------------------------------------------- //vranlc(2 * NK, &t1, A, x); /*inline vranlc function*/ in_t1 = r23 * A; in_a1 = (int)in_t1; in_a2 = A - t23 * in_a1; for(i=0; i<2*nk; i++) { in_t1 = r23 * t1; in_x1 = (int)in_t1; in_x2 = t1 - t23 * in_x1; in_t1 = in_a1 * in_x2 + in_a2 * in_x1; in_t2 = (int)(r23 * in_t1); in_z = in_t1 - t23 * in_t2; in_t3 = t23*in_z + in_a2 *in_x2; in_t4 = (int)(r46 * in_t3); t1 = in_t3 - t46 * in_t4; xx[k*2*nk + i] = r46 * t1; } //-------------------------------------------------------------------- // Compute Gaussian deviates by acceptance-rejection method and // tally counts in concentri//square annuli. This loop is not // vectorizable. //-------------------------------------------------------------------- //if (timers_enabled) timer_start(1); tmp_sx = 0.0; tmp_sy = 0.0; for (i = 0; i < nk; i++) { x1 = 2.0 * xx[k*2*nk + 2*i] - 1.0; x2 = 2.0 * xx[k*2*nk + (2*i+1)] - 1.0; t1 = x1 * x1 + x2 * x2; if (t1 <= 1.0) { t2 = sqrt(-2.0 * log(t1) / t1); t3 = (x1 * t2); t4 = (x2 * t2); l = MAX(fabs(t3), fabs(t4)); qq[k*nq + l] += 1.0; tmp_sx = tmp_sx + t3; tmp_sy = tmp_sy + t4; } } sx += tmp_sx; sy += tmp_sy; } #endif brisbane_mem mem_gc; brisbane_mem_create(sizeof(double), &mem_gc); brisbane_mem_reduce(mem_gc, brisbane_sum, brisbane_double); size_t kernel_gc_off[1] = { 0 }; size_t kernel_gc_idx[1] = { nq }; brisbane_kernel kernel_gc; brisbane_kernel_create("gc", &kernel_gc); brisbane_kernel_setmem(kernel_gc, 0, mem_qq, brisbane_rd); brisbane_kernel_setmem(kernel_gc, 1, mem_q, brisbane_rdwr); brisbane_kernel_setarg(kernel_gc, 2, sizeof(int), &blksize); brisbane_kernel_setarg(kernel_gc, 3, sizeof(int), &nq); brisbane_kernel_setmem(kernel_gc, 4, mem_gc, brisbane_rdwr); brisbane_task task4; brisbane_task_create(&task4); brisbane_task_kernel(task4, kernel_gc, 1, kernel_gc_off, kernel_gc_idx); brisbane_task_d2h(task4, mem_q, 0, nq * sizeof(double), q); brisbane_task_d2h(task4, mem_gc, 0, sizeof(double), &gc); brisbane_task_submit(task4, brisbane_gpu, NULL, true); #if 0 #pragma omp target teams distribute map(tofrom: gc) reduction(+:gc) map(qq[:0],q[:0]) for(i=0; i<nq; i++) { double sum_qi = 0.0; #pragma omp parallel for simd reduction(+:sum_qi) for(k=0; k<blksize; k++) sum_qi = sum_qi + qq[k*nq + i]; /*sum of each column of qq/q[i] */ q[i] += sum_qi; /*final sum of q*/ gc += sum_qi; } #endif }//end for }/*end omp data*/ timer_stop(0); tm = timer_read(0); nit = 0; verified = 1; if (m == 24) { sx_verify_value = -3.247834652034740e+3; sy_verify_value = -6.958407078382297e+3; } else if (m == 25) { sx_verify_value = -2.863319731645753e+3; sy_verify_value = -6.320053679109499e+3; } else if (m == 28) { sx_verify_value = -4.295875165629892e+3; sy_verify_value = -1.580732573678431e+4; } else if (m == 30) { sx_verify_value = 4.033815542441498e+4; sy_verify_value = -2.660669192809235e+4; } else if (m == 32) { sx_verify_value = 4.764367927995374e+4; sy_verify_value = -8.084072988043731e+4; } else if (m == 36) { sx_verify_value = 1.982481200946593e+5; sy_verify_value = -1.020596636361769e+5; } else if (m == 40) { sx_verify_value = -5.319717441530e+05; sy_verify_value = -3.688834557731e+05; } else { verified = 0; } if (verified) { sx_err = fabs((sx - sx_verify_value) / sx_verify_value); sy_err = fabs((sy - sy_verify_value) / sy_verify_value); verified = ((sx_err <= EPSILON) && (sy_err <= EPSILON)); } Mops = pow(2.0, m+1) / tm / 1000000.0; printf("\nEP Benchmark Results:\n\n"); #ifndef SPEC printf("CPU Time =%10.4lf\n", tm); #endif printf("N = 2^%5d\n", M); printf("No. Gaussian Pairs = %15.0lf\n", gc); printf("Sums = %25.15lE %25.15lE\n", sx, sy); printf("Counts: \n"); for (i = 0; i < nq; i++) { printf("%3d%15.0lf\n", i, q[i]); } print_results("EP", xclass, m+1, 0, 0, nit, tm, Mops, "Random numbers generated", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (timers_enabled) { if (tm <= 0.0) tm = 1.0; tt = timer_read(0); printf("\nTotal time: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm); tt = timer_read(1); printf("Gaussian pairs: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm); tt = timer_read(2); printf("Random numbers: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm); } free(x); free(q); free(xx); free(qq); brisbane_finalize(); return 0; }
ewald.h
#ifndef ewald_h #define ewald_h #include "logger.h" #include "types.h" namespace exafmm { class Ewald { //! Wave structure for Ewald summation struct Wave { vec3 K; //!< 3-D wave number vector real_t REAL; //!< real part of wave real_t IMAG; //!< imaginary part of wave }; typedef std::vector<Wave> Waves; //!< Vector of Wave types typedef Waves::iterator W_iter; //!< Iterator of Wave types private: const int ksize; //!< Number of waves in Ewald summation const real_t alpha; //!< Scaling parameter for Ewald summation const real_t sigma; //!< Scaling parameter for Ewald summation const real_t cutoff; //!< Cutoff distance const vec3 cycle; //!< Periodic cycle private: //! Forward DFT void dft(Waves & waves, Bodies & bodies) const { vec3 scale; for (int d=0; d<3; d++) scale[d]= 2 * M_PI / cycle[d]; // Scale conversion #pragma omp parallel for for (int w=0; w<int(waves.size()); w++) { // Loop over waves W_iter W=waves.begin()+w; // Wave iterator W->REAL = W->IMAG = 0; // Initialize waves for (B_iter B=bodies.begin(); B!=bodies.end(); B++) { // Loop over bodies real_t th = 0; // Initialize phase for (int d=0; d<3; d++) th += W->K[d] * B->X[d] * scale[d];// Determine phase W->REAL += B->SRC * std::cos(th); // Accumulate real component W->IMAG += B->SRC * std::sin(th); // Accumulate imaginary component } // End loop over bodies } // End loop over waves } //! Inverse DFT void idft(Waves & waves, Bodies & bodies) const { vec3 scale; for (int d=0; d<3; d++) scale[d] = 2 * M_PI / cycle[d]; // Scale conversion #pragma omp parallel for for (int b=0; b<int(bodies.size()); b++) { // Loop over bodies B_iter B=bodies.begin()+b; // Body iterator kvec4 TRG = kreal_t(0); // Initialize target values for (W_iter W=waves.begin(); W!=waves.end(); W++) { // Loop over waves real_t th = 0; // Initialzie phase for (int d=0; d<3; d++) th += W->K[d] * B->X[d] * scale[d];// Determine phase real_t dtmp = W->REAL * std::sin(th) - W->IMAG * std::cos(th);// Temporary value TRG[0] += W->REAL * std::cos(th) + W->IMAG * std::sin(th);// Accumulate potential for (int d=0; d<3; d++) TRG[d+1] -= dtmp * W->K[d]; // Accumulate force } // End loop over waves for (int d=0; d<3; d++) TRG[d+1] *= scale[d]; // Scale forces B->TRG += TRG; // Copy results to bodies } // End loop over bodies } //! Initialize wave vector Waves initWaves() const { Waves waves; // Initialzie wave vector int kmaxsq = ksize * ksize; // kmax squared int kmax = ksize; // kmax as integer for (int l=0; l<=kmax; l++) { // Loop over x component int mmin = -kmax; // Determine minimum y component if (l==0) mmin = 0; // Exception for minimum y component for (int m=mmin; m<=kmax; m++) { // Loop over y component int nmin = -kmax; // Determine minimum z component if (l==0 && m==0) nmin=1; // Exception for minimum z component for (int n=nmin; n<=kmax; n++) { // Loop over z component real_t ksq = l * l + m * m + n * n; // Wave number squared if (ksq <= kmaxsq) { // If wave number is below kmax Wave wave; // Initialzie wave structure wave.K[0] = l; // x component of k wave.K[1] = m; // y component of k wave.K[2] = n; // z component of k wave.REAL = wave.IMAG = 0; // Initialize amplitude waves.push_back(wave); // Push wave to vector } // End if for wave number } // End loop over z component } // End loop over y component } // End loop over x component return waves; // Return wave vector } //! Ewald real part P2P kernel void P2P(C_iter Ci, C_iter Cj, vec3 Xperiodic) const { for (B_iter Bi=Ci->BODY; Bi!=Ci->BODY+Ci->NBODY; Bi++) { // Loop over target bodies for (B_iter Bj=Cj->BODY; Bj!=Cj->BODY+Cj->NBODY; Bj++) {// Loop over source bodies vec3 dX = Bi->X - Bj->X - Xperiodic; // Distance vector from source to target real_t R2 = norm(dX); // R^2 if (0 < R2 && R2 < cutoff * cutoff) { // Exclude self interaction and cutoff real_t R2s = R2 * alpha * alpha; // (R * alpha)^2 real_t Rs = std::sqrt(R2s); // R * alpha real_t invRs = 1 / Rs; // 1 / (R * alpha) real_t invR2s = invRs * invRs; // 1 / (R * alpha)^2 real_t invR3s = invR2s * invRs; // 1 / (R * alpha)^3 real_t dtmp = Bj->SRC * (M_2_SQRTPI * std::exp(-R2s) * invR2s + erfc(Rs) * invR3s); dtmp *= alpha * alpha * alpha; // Scale temporary value Bi->TRG[0] += Bj->SRC * erfc(Rs) * invRs * alpha; // Ewald real potential Bi->TRG[1] -= dX[0] * dtmp; // x component of Ewald real force Bi->TRG[2] -= dX[1] * dtmp; // y component of Ewald real force Bi->TRG[3] -= dX[2] * dtmp; // z component of Ewald real force } // End if for self interaction } // End loop over source bodies } // End loop over target bodies } //! Recursive functor for traversing tree to find neighbors struct Neighbor { Ewald * ewald; //!< Ewald object C_iter Ci; //!< Iterator of current target cell C_iter Cj; //!< Iterator of current source cell C_iter C0; //!< Iterator of first source cell Neighbor(Ewald * _ewald, C_iter _Ci, C_iter _Cj, C_iter _C0) :// Constructor ewald(_ewald), Ci(_Ci), Cj(_Cj), C0(_C0) {} // Initialize variables void operator() () { // Overload operator() vec3 dX = Ci->X - Cj->X; // Distance vector from source to target wrap(dX, ewald->cycle); // Wrap around periodic domain vec3 Xperiodic = Ci->X - Cj->X - dX; // Coordinate offset for periodic B.C. real_t R = std::sqrt(norm(dX)); // Scalar distance if (R - Ci->R - Cj->R < sqrtf(3) * ewald->cutoff) { // If cells are close if(Cj->NCHILD == 0) ewald->P2P(Ci,Cj,Xperiodic); // Ewald real part for (C_iter CC=C0+Cj->ICHILD; CC!=C0+Cj->ICHILD+Cj->NCHILD; CC++) {// Loop over cell's children Neighbor neighbor(ewald, Ci, CC, C0); // Instantiate recursive functor neighbor(); // Recursive call } // End loop over cell's children } // End if for far cells } // End overload operator() }; public: //! Constructor Ewald(int _ksize, real_t _alpha, real_t _sigma, real_t _cutoff, vec3 _cycle) : ksize(_ksize), alpha(_alpha), sigma(_sigma), cutoff(_cutoff), cycle(_cycle) {} // Initialize variables //! Ewald real part void realPart(Cells & cells, Cells & jcells) { logger::startTimer("Ewald real part"); // Start timer C_iter Cj = jcells.begin(); // Set begin iterator of source cells mk_task_group; // Intitialize tasks for (C_iter Ci=cells.begin(); Ci!=cells.end(); Ci++) { // Loop over target cells if (Ci->NCHILD == 0) { // If target cell is leaf Neighbor neighbor(this, Ci, Cj, Cj); // Instantiate recursive functor create_taskc(neighbor); // Create task for recursive call } // End if for leaf target cell } // End loop over target cells wait_tasks; // Synchronize tasks logger::stopTimer("Ewald real part"); // Stop timer } //! Subtract self term void selfTerm(Bodies & bodies) { for (B_iter B=bodies.begin(); B!=bodies.end(); B++) { // Loop over all bodies B->TRG[0] -= M_2_SQRTPI * B->SRC * alpha; // Self term of Ewald real part } // End loop over all bodies in cell } //! Ewald wave part void wavePart(Bodies & bodies, Bodies & jbodies) { logger::startTimer("Ewald wave part"); // Start timer Waves waves = initWaves(); // Initialize wave vector dft(waves,jbodies); // Apply DFT to bodies to get waves vec3 scale; for (int d=0; d<3; d++) scale[d] = 2 * M_PI / cycle[d]; // Scale conversion real_t coef = 2 / sigma / cycle[0] / cycle[1] / cycle[2]; // First constant real_t coef2 = 1 / (4 * alpha * alpha); // Second constant for (W_iter W=waves.begin(); W!=waves.end(); W++) { // Loop over waves vec3 K = W->K * scale; // Wave number scaled real_t K2 = norm(K); // Wave number squared real_t factor = coef * std::exp(-K2 * coef2) / K2; // Wave factor W->REAL *= factor; // Apply wave factor to real part W->IMAG *= factor; // Apply wave factor to imaginary part } // End loop over waves idft(waves,bodies); // Inverse DFT logger::stopTimer("Ewald wave part"); // Stop timer } void print(int stringLength) { if (logger::verbose) { // If verbose flag is true std::cout << std::setw(stringLength) << std::fixed << std::left// Set format << "ksize" << " : " << ksize << std::endl // Print ksize << std::setw(stringLength) // Set format << "alpha" << " : " << alpha << std::endl // Print alpha << std::setw(stringLength) // Set format << "sigma" << " : " << sigma << std::endl // Print sigma << std::setw(stringLength) // Set format << "cutoff" << " : " << cutoff << std::endl // Print cutoff << std::setw(stringLength) // Set format << "cycle" << " : " << cycle << std::endl; // Print cycle } // End if for verbose flag } }; } #endif
gmm.c
/** @file gmm.c ** @brief Gaussian Mixture Models - Implementation ** @author David Novotny ** @author Andrea Vedaldi **/ /* Copyright (C) 2013 David Novotny and Andrea Vedaldi. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page gmm Gaussian Mixture Models (GMM) @author David Novotny @author Andrea Vedaldi @tableofcontents <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @ref gmm.h is an implementation of *Gaussian Mixture Models* (GMMs). The main functionality provided by this module is learning GMMs from data by maximum likelihood. Model optimization uses the Expectation Maximization (EM) algorithm @cite{dempster77maximum}. The implementation supports @c float or @c double data types, is parallelized, and is tuned to work reliably and effectively on datasets of visual features. Stability is obtained in part by regularizing and restricting the parameters of the GMM. @ref gmm-starting demonstreates how to use the C API to compute the FV representation of an image. For further details refer to: - @subpage gmm-fundamentals <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section gmm-starting Getting started <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> In order to use @ref gmm.h to learn a GMM from training data, create a new ::VlGMM object instance, set the parameters as desired, and run the training code. The following example learns @c numClusters Gaussian components from @c numData vectors of dimension @c dimension and storage class @c float using at most 100 EM iterations: @code float * means ; float * covariances ; float * priors ; float * posteriors ; double loglikelihood ; // create a new instance of a GMM object for float data gmm = vl_gmm_new (VL_TYPE_FLOAT, dimension, numClusters) ; // set the maximum number of EM iterations to 100 vl_gmm_set_max_num_iterations (gmm, 100) ; // set the initialization to random selection vl_gmm_set_initialization (gmm,VlGMMRand); // cluster the data, i.e. learn the GMM vl_gmm_cluster (gmm, data, numData); // get the means, covariances, and priors of the GMM means = vl_gmm_get_means(gmm); covariances = vl_gmm_get_covariances(gmm); priors = vl_gmm_get_priors(gmm); // get loglikelihood of the estimated GMM loglikelihood = vl_gmm_get_loglikelihood(gmm) ; // get the soft assignments of the data points to each cluster posteriors = vl_gmm_get_posteriors(gmm) ; @endcode @note ::VlGMM assumes that the covariance matrices of the GMM are diagonal. This reduces significantly the number of parameters to learn and is usually an acceptable compromise in vision applications. If the data is significantly correlated, it can be beneficial to de-correlate it by PCA rotation or projection in pre-processing. ::vl_gmm_get_loglikelihood is used to get the final loglikelihood of the estimated mixture, ::vl_gmm_get_means and ::vl_gmm_get_covariances to obtain the means and the diagonals of the covariance matrices of the estimated Gaussian modes, and ::vl_gmm_get_posteriors to get the posterior probabilities that a given point is associated to each of the modes (soft assignments). The learning algorithm, which uses EM, finds a local optimum of the objective function. Therefore the initialization is crucial in obtaining a good model, measured in term of the final loglikelihood. ::VlGMM supports a few methods (use ::vl_gmm_set_initialization to choose one) as follows: Method | ::VlGMMInitialization enumeration | Description ----------------------|-----------------------------------------|----------------------------------------------- Random initialization | ::VlGMMRand | Random initialization of the mixture parameters KMeans | ::VlGMMKMeans | Initialization of the mixture parameters using ::VlKMeans Custom | ::VlGMMCustom | User specified initialization Note that in the case of ::VlGMMKMeans initialization, an object of type ::VlKMeans object must be created and passed to the ::VlGMM instance (see @ref kmeans to see how to correctly set up this object). When a user wants to use the ::VlGMMCustom method, the initial means, covariances and priors have to be specified using the ::vl_gmm_set_means, ::vl_gmm_set_covariances and ::vl_gmm_set_priors methods. **/ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page gmm-fundamentals GMM fundamentals @tableofcontents <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> A *Gaussian Mixture Model* (GMM) is a mixture of $K$ multivariate Gaussian distributions. In order to sample from a GMM, one samples first the component index $k \in \{1,\dots,K\}$ with *prior probability* $\pi_k$, and then samples the vector $\bx \in \mathbb{R}^d$ from the $k$-th Gaussian distribution $p(\bx|\mu_k,\Sigma_k)$. Here $\mu_k$ and $\Sigma_k$ are respectively the *mean* and *covariance* of the distribution. The GMM is completely specified by the parameters $\Theta=\{\pi_k,\mu_k,\Sigma_k; k = 1,\dots,K\}$ The density $p(\bx|\Theta)$ induced on the training data is obtained by marginalizing the component selector $k$, obtaining \[ p(\bx|\Theta) = \sum_{k=1}^{K} \pi_k p( \bx_i |\mu_k,\Sigma_k), \qquad p( \bx |\mu_k,\Sigma_k) = \frac{1}{\sqrt{(2\pi)^d\det\Sigma_k}} \exp\left[ -\frac{1}{2} (\bx-\mu_k)^\top\Sigma_k^{-1}(\bx-\mu_k) \right]. \] Learning a GMM to fit a dataset $X=(\bx_1, \dots, \bx_n)$ is usually done by maximizing the log-likelihood of the data: @f[ \ell(\Theta;X) = E_{\bx\sim\hat p} [ \log p(\bx|\Theta) ] = \frac{1}{n}\sum_{i=1}^{n} \log \sum_{k=1}^{K} \pi_k p(\bx_i|\mu_k, \Sigma_k) @f] where $\hat p$ is the empirical distribution of the data. An algorithm to solve this problem is introduced next. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section gmm-em Learning a GMM by expectation maximization <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The direct maximization of the log-likelihood function of a GMM is difficult due to the fact that the assignments of points to Gaussian mode is not observable and, as such, must be treated as a latent variable. Usually, GMMs are learned by using the *Expectation Maximization* (EM) algorithm @cite{dempster77maximum}. Consider in general the problem of estimating to the maximum likelihood a distribution $p(x|\Theta) = \int p(x,h|\Theta)\,dh$, where $x$ is a measurement, $h$ is a *latent variable*, and $\Theta$ are the model parameters. By introducing an auxiliary distribution $q(h|x)$ on the latent variable, one can use Jensen inequality to obtain the following lower bound on the log-likelihood: @f{align*} \ell(\Theta;X) = E_{x\sim\hat p} \log p(x|\Theta) &= E_{x\sim\hat p} \log \int p(x,h|\Theta) \,dh \\ &= E_{x\sim\hat p} \log \int \frac{p(x,h|\Theta)}{q(h|x)} q(h|x)\,dh \\ &\geq E_{x\sim\hat p} \int q(h) \log \frac{p(x,h|\Theta)}{q(h|x)}\,dh \\ &= E_{(x,q) \sim q(h|x) \hat p(x)} \log p(x,h|\Theta) - E_{(x,q) \sim q(h|x) \hat p(x)} \log q(h|x) @f} The first term of the last expression is the log-likelihood of the model where both the $x$ and $h$ are observed and joinlty distributed as $q(x|h)\hat p(x)$; the second term is the a average entropy of the latent variable, which does not depend on $\Theta$. This lower bound is maximized and becomes tight by setting $q(h|x) = p(h|x,\Theta)$ to be the posterior distribution on the latent variable $h$ (given the current estimate of the parameters $\Theta$). In fact: \[ E_{x \sim \hat p} \log p(x|\Theta) = E_{(x,h) \sim p(h|x,\Theta) \hat p(x)}\left[ \log \frac{p(x,h|\Theta)}{p(h|x,\Theta)} \right] = E_{(x,h) \sim p(h|x,\Theta) \hat p(x)} [ \log p(x|\Theta) ] = \ell(\Theta;X). \] EM alternates between updating the latent variable auxiliary distribution $q(h|x) = p(h|x,\Theta_t)$ (*expectation step*) given the current estimate of the parameters $\Theta_t$, and then updating the model parameters $\Theta_{t+1}$ by maximizing the log-likelihood lower bound derived (*maximization step*). The simplification is that in the maximization step both $x$ and $h$ are now ``observed'' quantities. This procedure converges to a local optimum of the model log-likelihood. @subsection gmm-expectation-step Expectation step In the case of a GMM, the latent variables are the point-to-cluster assignments $k_i, i=1,\dots,n$, one for each of $n$ data points. The auxiliary distribution $q(k_i|\bx_i) = q_{ik}$ is a matrix with $n \times K$ entries. Each row $q_{i,:}$ can be thought of as a vector of soft assignments of the data points $\bx_i$ to each of the Gaussian modes. Setting $q_{ik} = p(k_i | \bx_i, \Theta)$ yields \[ q_{ik} = \frac {\pi_k p(\bx_i|\mu_k,\Sigma_k)} {\sum_{l=1}^K \pi_l p(\bx_i|\mu_l,\Sigma_l)} \] where the Gaussian density $p(\bx_i|\mu_k,\Sigma_k)$ was given above. One important point to keep in mind when these probabilities are computed is the fact that the Gaussian densities may attain very low values and underflow in a vanilla implementation. Furthermore, VLFeat GMM implementation restricts the covariance matrices to be diagonal. In this case, the computation of the determinant of $\Sigma_k$ reduces to computing the trace of the matrix and the inversion of $\Sigma_k$ could be obtained by inverting the elements on the diagonal of the covariance matrix. @subsection gmm-maximization-step Maximization step The M step estimates the parameters of the Gaussian mixture components and the prior probabilities $\pi_k$ given the auxiliary distribution on the point-to-cluster assignments computed in the E step. Since all the variables are now ``observed'', the estimate is quite simple. For example, the mean $\mu_k$ of a Gaussian mode is obtained as the mean of the data points assigned to it (accounting for the strength of the soft assignments). The other quantities are obtained in a similar manner, yielding to: @f{align*} \mu_k &= { { \sum_{i=1}^n q_{ik} \bx_{i} } \over { \sum_{i=1}^n q_{ik} } }, \\ \Sigma_k &= { { \sum_{i=1}^n { q_{ik} (\bx_{i} - \mu_{k}) {(\bx_{i} - \mu_{k})}^T } } \over { \sum_{i=1}^n q_{ik} } }, \\ \pi_k &= { \sum_{i=1}^n { q_{ik} } \over { \sum_{i=1}^n \sum_{l=1}^K q_{il} } }. @f} <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section gmm-fundamentals-init Initialization algorithms <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The EM algorithm is a local optimization method. As such, the quality of the solution strongly depends on the quality of the initial values of the parameters (i.e. of the locations and shapes of the Gaussian modes). @ref gmm.h supports the following cluster initialization algorithms: - <b>Random data points.</b> (::vl_gmm_init_with_rand_data) This method sets the means of the modes by sampling at random a corresponding number of data points, sets the covariance matrices of all the modes are to the covariance of the entire dataset, and sets the prior probabilities of the Gaussian modes to be uniform. This initialization method is the fastest, simplest, as well as the one most likely to end in a bad local minimum. - <b>KMeans initialization</b> (::vl_gmm_init_with_kmeans) This method uses KMeans to pre-cluster the points. It then sets the means and covariances of the Gaussian distributions the sample means and covariances of each KMeans cluster. It also sets the prior probabilities to be proportional to the mass of each cluster. In order to use this initialization method, a user can specify an instance of ::VlKMeans by using the function ::vl_gmm_set_kmeans_init_object, or let ::VlGMM create one automatically. Alternatively, one can manually specify a starting point (::vl_gmm_set_priors, ::vl_gmm_set_means, ::vl_gmm_set_covariances). **/ #include "gmm.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #ifndef VL_DISABLE_SSE2 #include "mathop_sse2.h" #endif #ifndef VL_DISABLE_AVX #include "mathop_avx.h" #endif /* ---------------------------------------------------------------- */ #ifndef VL_GMM_INSTANTIATING /* ---------------------------------------------------------------- */ #define VL_GMM_MIN_VARIANCE 1e-6 #define VL_GMM_MIN_POSTERIOR 1e-2 #define VL_GMM_MIN_PRIOR 1e-6 struct _VlGMM { vl_type dataType ; /**< Data type. */ vl_size dimension ; /**< Data dimensionality. */ vl_size numClusters ; /**< Number of clusters */ vl_size numData ; /**< Number of last time clustered data points. */ vl_size maxNumIterations ; /**< Maximum number of refinement iterations. */ vl_size numRepetitions ; /**< Number of clustering repetitions. */ int verbosity ; /**< Verbosity level. */ void * means; /**< Means of Gaussian modes. */ void * covariances; /**< Diagonals of covariance matrices of Gaussian modes. */ void * priors; /**< Weights of Gaussian modes. */ void * posteriors; /**< Probabilities of correspondences of points to clusters. */ double * sigmaLowBound ; /**< Lower bound on the diagonal covariance values. */ VlGMMInitialization initialization; /**< Initialization option */ VlKMeans * kmeansInit; /**< Kmeans object for initialization of gaussians */ double LL ; /**< Current solution loglikelihood */ vl_bool kmeansInitIsOwner; /**< Indicates whether a user provided the kmeans initialization object */ } ; /* ---------------------------------------------------------------- */ /* Life-cycle */ /* ---------------------------------------------------------------- */ static void _vl_gmm_prepare_for_data (VlGMM* self, vl_size numData) { if (self->numData < numData) { vl_free(self->posteriors) ; self->posteriors = vl_malloc(vl_get_type_size(self->dataType) * numData * self->numClusters) ; } self->numData = numData ; } /** @brief Create a new GMM object ** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE) ** @param dimension dimension of the data. ** @param numComponents number of Gaussian mixture components. ** @return new GMM object instance. **/ VlGMM * vl_gmm_new (vl_type dataType, vl_size dimension, vl_size numComponents) { vl_index i ; vl_size size = vl_get_type_size(dataType) ; VlGMM * self = vl_calloc(1, sizeof(VlGMM)) ; self->dataType = dataType; self->numClusters = numComponents ; self->numData = 0; self->dimension = dimension ; self->initialization = VlGMMRand; self->verbosity = 0 ; self->maxNumIterations = 50; self->numRepetitions = 1; self->sigmaLowBound = NULL ; self->priors = NULL ; self->covariances = NULL ; self->means = NULL ; self->posteriors = NULL ; self->kmeansInit = NULL ; self->kmeansInitIsOwner = VL_FALSE; self->priors = vl_calloc (numComponents, size) ; self->means = vl_calloc (numComponents * dimension, size) ; self->covariances = vl_calloc (numComponents * dimension, size) ; self->sigmaLowBound = vl_calloc (dimension, sizeof(double)) ; for (i = 0 ; i < (unsigned)self->dimension ; ++i) { self->sigmaLowBound[i] = 1e-4 ; } return self ; } /** @brief Reset state ** @param self object. ** ** The function reset the state of the GMM object. It deletes ** any stored posterior and other internal state variables. **/ void vl_gmm_reset (VlGMM * self) { if (self->posteriors) { vl_free(self->posteriors) ; self->posteriors = NULL ; self->numData = 0 ; } if (self->kmeansInit && self->kmeansInitIsOwner) { vl_kmeans_delete(self->kmeansInit) ; self->kmeansInit = NULL ; self->kmeansInitIsOwner = VL_FALSE ; } } /** @brief Deletes a GMM object ** @param self GMM object instance. ** ** The function deletes the GMM object instance created ** by ::vl_gmm_new. **/ void vl_gmm_delete (VlGMM * self) { if(self->means) vl_free(self->means); if(self->covariances) vl_free(self->covariances); if(self->priors) vl_free(self->priors); if(self->posteriors) vl_free(self->posteriors); if(self->sigmaLowBound) vl_free(self->sigmaLowBound); if(self->kmeansInit && self->kmeansInitIsOwner) { vl_kmeans_delete(self->kmeansInit); } vl_free(self); } /* ---------------------------------------------------------------- */ /* Getters and setters */ /* ---------------------------------------------------------------- */ /** @brief Get data type ** @param self object ** @return data type. **/ vl_type vl_gmm_get_data_type (VlGMM const * self) { return self->dataType ; } /** @brief Get the number of clusters ** @param self object ** @return number of clusters. **/ vl_size vl_gmm_get_num_clusters (VlGMM const * self) { return self->numClusters ; } /** @brief Get the number of data points ** @param self object ** @return number of data points. **/ vl_size vl_gmm_get_num_data (VlGMM const * self) { return self->numData ; } /** @brief Get the log likelihood of the current mixture ** @param self object ** @return loglikelihood. **/ double vl_gmm_get_loglikelihood (VlGMM const * self) { return self->LL ; } /** @brief Get verbosity level ** @param self object ** @return verbosity level. **/ int vl_gmm_get_verbosity (VlGMM const * self) { return self->verbosity ; } /** @brief Set verbosity level ** @param self object ** @param verbosity verbosity level. **/ void vl_gmm_set_verbosity (VlGMM * self, int verbosity) { self->verbosity = verbosity ; } /** @brief Get means ** @param self object ** @return cluster means. **/ void const * vl_gmm_get_means (VlGMM const * self) { return self->means ; } /** @brief Get covariances ** @param self object ** @return diagonals of cluster covariance matrices. **/ void const * vl_gmm_get_covariances (VlGMM const * self) { return self->covariances ; } /** @brief Get priors ** @param self object ** @return priors of cluster gaussians. **/ void const * vl_gmm_get_priors (VlGMM const * self) { return self->priors ; } /** @brief Get posteriors ** @param self object ** @return posterior probabilities of cluster memberships. **/ void const * vl_gmm_get_posteriors (VlGMM const * self) { return self->posteriors ; } /** @brief Get maximum number of iterations ** @param self object ** @return maximum number of iterations. **/ vl_size vl_gmm_get_max_num_iterations (VlGMM const * self) { return self->maxNumIterations ; } /** @brief Set maximum number of iterations ** @param self VlGMM filter. ** @param maxNumIterations maximum number of iterations. **/ void vl_gmm_set_max_num_iterations (VlGMM * self, vl_size maxNumIterations) { self->maxNumIterations = maxNumIterations ; } /** @brief Get maximum number of repetitions. ** @param self object ** @return current number of repretitions for quantization. **/ vl_size vl_gmm_get_num_repetitions (VlGMM const * self) { return self->numRepetitions ; } /** @brief Set maximum number of repetitions ** @param self object ** @param numRepetitions maximum number of repetitions. ** The number of repetitions cannot be smaller than 1. **/ void vl_gmm_set_num_repetitions (VlGMM * self, vl_size numRepetitions) { assert (numRepetitions >= 1) ; self->numRepetitions = numRepetitions ; } /** @brief Get data dimension ** @param self object ** @return data dimension. **/ vl_size vl_gmm_get_dimension (VlGMM const * self) { return self->dimension ; } /** @brief Get initialization algorithm ** @param self object ** @return initialization algorithm. **/ VlGMMInitialization vl_gmm_get_initialization (VlGMM const * self) { return self->initialization ; } /** @brief Set initialization algorithm. ** @param self object ** @param init initialization algorithm. **/ void vl_gmm_set_initialization (VlGMM * self, VlGMMInitialization init) { self->initialization = init; } /** @brief Get KMeans initialization object. ** @param self object ** @return kmeans initialization object. **/ VlKMeans * vl_gmm_get_kmeans_init_object (VlGMM const * self) { return self->kmeansInit; } /** @brief Set KMeans initialization object. ** @param self object ** @param kmeans initialization KMeans object. **/ void vl_gmm_set_kmeans_init_object (VlGMM * self, VlKMeans * kmeans) { if (self->kmeansInit && self->kmeansInitIsOwner) { vl_kmeans_delete(self->kmeansInit) ; } self->kmeansInit = kmeans; self->kmeansInitIsOwner = VL_FALSE; } /** @brief Get the lower bound on the diagonal covariance values. ** @param self object ** @return lower bound on covariances. **/ double const * vl_gmm_get_covariance_lower_bounds (VlGMM const * self) { return self->sigmaLowBound; } /** @brief Set the lower bounds on diagonal covariance values. ** @param self object. ** @param bounds bounds. ** ** There is one lower bound per dimension. Use ::vl_gmm_set_covariance_lower_bound ** to set all of them to a given scalar. **/ void vl_gmm_set_covariance_lower_bounds (VlGMM * self, double const * bounds) { memcpy(self->sigmaLowBound, bounds, sizeof(double) * self->dimension) ; } /** @brief Set the lower bounds on diagonal covariance values. ** @param self object. ** @param bound bound. ** ** While there is one lower bound per dimension, this function sets ** all of them to the specified scalar. Use ::vl_gmm_set_covariance_lower_bounds ** to set them individually. **/ void vl_gmm_set_covariance_lower_bound (VlGMM * self, double bound) { int i ; for (i = 0 ; i < (signed)self->dimension ; ++i) { self->sigmaLowBound[i] = bound ; } } /* ---------------------------------------------------------------- */ /* Instantiate shuffle algorithm */ #define VL_SHUFFLE_type vl_uindex #define VL_SHUFFLE_prefix _vl_gmm #include "shuffle-def.h" /* #ifdef VL_GMM_INSTANTITATING */ #endif /* ---------------------------------------------------------------- */ #ifdef VL_GMM_INSTANTIATING /* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */ /* Posterior assignments */ /* ---------------------------------------------------------------- */ /** @fn vl_get_gmm_data_posterior_f(float*,vl_size,vl_size,float const*,float const*,vl_size,float const*,float const*) ** @brief Get Gaussian modes posterior probabilities ** @param posteriors posterior probabilities (output)/ ** @param numClusters number of modes in the GMM model. ** @param numData number of data elements. ** @param priors prior mode probabilities of the GMM model. ** @param means means of the GMM model. ** @param dimension data dimension. ** @param covariances diagonal covariances of the GMM model. ** @param data data. ** @return data log-likelihood. ** ** This is a helper function that does not require a ::VlGMM object ** instance to operate. **/ double VL_XCAT(vl_get_gmm_data_posteriors_, SFX) (TYPE * posteriors, vl_size numClusters, vl_size numData, TYPE const * priors, TYPE const * means, vl_size dimension, TYPE const * covariances, TYPE const * data) { vl_index i_d, i_cl; vl_size dim; double LL = 0; TYPE halfDimLog2Pi = (dimension / 2.0) * log(2.0*VL_PI); TYPE * logCovariances ; TYPE * logWeights ; TYPE * invCovariances ; #if (FLT == VL_TYPE_FLOAT) VlFloatVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_f(VlDistanceMahalanobis) ; #else VlDoubleVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_d(VlDistanceMahalanobis) ; #endif logCovariances = vl_malloc(sizeof(TYPE) * numClusters) ; invCovariances = vl_malloc(sizeof(TYPE) * numClusters * dimension) ; logWeights = vl_malloc(sizeof(TYPE) * numClusters) ; #if defined(_OPENMP) #pragma omp parallel for private(i_cl,dim) num_threads(vl_get_max_threads()) #endif for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) { TYPE logSigma = 0 ; if (priors[i_cl] < VL_GMM_MIN_PRIOR) { logWeights[i_cl] = - (TYPE) VL_INFINITY_D ; } else { logWeights[i_cl] = log(priors[i_cl]); } for(dim = 0 ; dim < dimension ; ++ dim) { logSigma += log(covariances[i_cl*dimension + dim]); invCovariances [i_cl*dimension + dim] = (TYPE) 1.0 / covariances[i_cl*dimension + dim]; } logCovariances[i_cl] = logSigma; } /* end of parallel region */ #if defined(_OPENMP) #pragma omp parallel for private(i_cl,i_d) reduction(+:LL) \ num_threads(vl_get_max_threads()) #endif for (i_d = 0 ; i_d < (signed)numData ; ++ i_d) { TYPE clusterPosteriorsSum = 0; TYPE maxPosterior = (TYPE)(-VL_INFINITY_D) ; for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) { TYPE p = logWeights[i_cl] - halfDimLog2Pi - 0.5 * logCovariances[i_cl] - 0.5 * distFn (dimension, data + i_d * dimension, means + i_cl * dimension, invCovariances + i_cl * dimension) ; posteriors[i_cl + i_d * numClusters] = p ; if (p > maxPosterior) { maxPosterior = p ; } } for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { TYPE p = posteriors[i_cl + i_d * numClusters] ; p = exp(p - maxPosterior) ; posteriors[i_cl + i_d * numClusters] = p ; clusterPosteriorsSum += p ; } LL += log(clusterPosteriorsSum) + (double) maxPosterior ; for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { posteriors[i_cl + i_d * numClusters] /= clusterPosteriorsSum ; } } /* end of parallel region */ vl_free(logCovariances); vl_free(logWeights); vl_free(invCovariances); return LL; } /* ---------------------------------------------------------------- */ /* Restarts zero-weighted Gaussians */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_gmm_maximization_, SFX) (VlGMM * self, TYPE * posteriors, TYPE * priors, TYPE * covariances, TYPE * means, TYPE const * data, vl_size numData) ; static vl_size VL_XCAT(_vl_gmm_restart_empty_modes_, SFX) (VlGMM * self, TYPE const * data) { vl_size dimension = self->dimension; vl_size numClusters = self->numClusters; vl_index i_cl, j_cl, i_d, d; vl_size zeroWNum = 0; TYPE * priors = (TYPE*)self->priors ; TYPE * means = (TYPE*)self->means ; TYPE * covariances = (TYPE*)self->covariances ; TYPE * posteriors = (TYPE*)self->posteriors ; //VlRand * rand = vl_get_rand() ; TYPE * mass = vl_calloc(sizeof(TYPE), self->numClusters) ; if (numClusters <= 1) { return 0 ; } /* compute statistics */ { vl_uindex i, k ; vl_size numNullAssignments = 0 ; for (i = 0 ; i < self->numData ; ++i) { for (k = 0 ; k < self->numClusters ; ++k) { TYPE p = ((TYPE*)self->posteriors)[k + i * self->numClusters] ; mass[k] += p ; if (p < VL_GMM_MIN_POSTERIOR) { numNullAssignments ++ ; } } } if (self->verbosity) { VL_PRINTF("gmm: sparsity of data posterior: %.1f%%\n", (double)numNullAssignments / (self->numData * self->numClusters) * 100) ; } } #if 0 /* search for cluster with negligible weight and reassign them to fat clusters */ for (i_cl = 0 ; i_cl < numClusters ; ++i_cl) { if (priors[i_cl] < 0.00001/numClusters) { double mass = priors[0] ; vl_index best = 0 ; for (j_cl = 1 ; j_cl < numClusters ; ++j_cl) { if (priors[j_cl] > mass) { mass = priors[j_cl] ; best = j_cl ; } } if (j_cl == i_cl) { /* this should never happen */ continue ; } j_cl = best ; zeroWNum ++ ; VL_PRINTF("gmm: restarting mode %d by splitting mode %d (with prior %f)\n", i_cl,j_cl,mass) ; priors[i_cl] = mass/2 ; priors[j_cl] = mass/2 ; for (d = 0 ; d < dimension ; ++d) { TYPE sigma2 = covariances[j_cl*dimension + d] ; TYPE sigma = VL_XCAT(vl_sqrt_,SFX)(sigma2) ; means[i_cl*dimension + d] = means[j_cl*dimension + d] + 0.001 * (vl_rand_real1(rand) - 0.5) * sigma ; covariances[i_cl*dimension + d] = sigma2 ; } } } #endif /* search for cluster with negligible weight and reassign them to fat clusters */ for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { double size = - VL_INFINITY_D ; vl_index best = -1 ; if (mass[i_cl] >= VL_GMM_MIN_POSTERIOR * VL_MAX(1.0, (double) self->numData / self->numClusters)) { continue ; } if (self->verbosity) { VL_PRINTF("gmm: mode %d is nearly empty (mass %f)\n", i_cl, mass[i_cl]) ; } /* Search for the Gaussian components that (approximately) maximally contribute to make the negative log-likelihood of the data large. Then split the worst offender. To do so, we approximate the exptected log-likelihood of the GMM: E[-log(f(x))] = H(f) = - log \int f(x) log f(x) where the density f(x) = sum_k pk gk(x) is a GMM. This is intractable but it is easy to approximate if we suppose that supp gk is disjoint with supp gq for all components k ~= q. In this canse H(f) ~= sum_k [ - pk log(pk) + pk H(gk) ] where H(gk) is the entropy of component k taken alone. The entropy of the latter is given by: H(gk) = D/2 (1 + log(2pi) + 1/2 sum_{i=0}^D log sigma_i^2 */ for (j_cl = 0 ; j_cl < (signed)numClusters ; ++j_cl) { double size_ ; if (priors[j_cl] < VL_GMM_MIN_PRIOR) { continue ; } size_ = + 0.5 * dimension * (1.0 + log(2*VL_PI)) ; for(d = 0 ; d < (signed)dimension ; d++) { double sigma2 = covariances[j_cl * dimension + d] ; size_ += 0.5 * log(sigma2) ; } size_ = priors[j_cl] * (size_ - log(priors[j_cl])) ; if (self->verbosity > 1) { VL_PRINTF("gmm: mode %d: prior %f, mass %f, entropy contribution %f\n", j_cl, priors[j_cl], mass[j_cl], size_) ; } if (size_ > size) { size = size_ ; best = j_cl ; } } j_cl = best ; if (j_cl == i_cl || j_cl < 0) { if (self->verbosity) { VL_PRINTF("gmm: mode %d is empty, " "but no other mode to split could be found\n", i_cl) ; } continue ; } if (self->verbosity) { VL_PRINTF("gmm: reinitializing empty mode %d with mode %d (prior %f, mass %f, score %f)\n", i_cl, j_cl, priors[j_cl], mass[j_cl], size) ; } /* Search for the dimension with maximum variance. */ size = - VL_INFINITY_D ; best = - 1 ; for(d = 0; d < (signed)dimension; d++) { double sigma2 = covariances[j_cl * dimension + d] ; if (sigma2 > size) { size = sigma2 ; best = d ; } } /* Reassign points j_cl (mode to split) to i_cl (empty mode). */ { TYPE mu = means[best + j_cl * self->dimension] ; for(i_d = 0 ; i_d < (signed)self->numData ; ++ i_d) { TYPE p = posteriors[j_cl + self->numClusters * i_d] ; TYPE q = posteriors[i_cl + self->numClusters * i_d] ; /* ~= 0 */ if (data[best + i_d * self->dimension] < mu) { /* assign this point to i_cl */ posteriors[i_cl + self->numClusters * i_d] = p + q ; posteriors[j_cl + self->numClusters * i_d] = 0 ; } else { /* assign this point to j_cl */ posteriors[i_cl + self->numClusters * i_d] = 0 ; posteriors[j_cl + self->numClusters * i_d] = p + q ; } } } /* Re-estimate. */ VL_XCAT(_vl_gmm_maximization_, SFX) (self,posteriors,priors,covariances,means,data,self->numData) ; } return zeroWNum; } /* ---------------------------------------------------------------- */ /* Helpers */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_gmm_apply_bounds_, SFX)(VlGMM * self) { vl_uindex dim ; vl_uindex k ; vl_size numAdjusted = 0 ; TYPE * cov = (TYPE*)self->covariances ; double const * lbs = self->sigmaLowBound ; for (k = 0 ; k < self->numClusters ; ++k) { vl_bool adjusted = VL_FALSE ; for (dim = 0 ; dim < self->dimension ; ++dim) { if (cov[k * self->dimension + dim] < lbs[dim] ) { cov[k * self->dimension + dim] = lbs[dim] ; adjusted = VL_TRUE ; } } if (adjusted) { numAdjusted ++ ; } } if (numAdjusted > 0 && self->verbosity > 0) { VL_PRINT("gmm: detected %d of %d modes with at least one dimension " "with covariance too small (set to lower bound)\n", numAdjusted, self->numClusters) ; } } /* ---------------------------------------------------------------- */ /* EM - Maximization step */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_gmm_maximization_, SFX) (VlGMM * self, TYPE * posteriors, TYPE * priors, TYPE * covariances, TYPE * means, TYPE const * data, vl_size numData) { vl_size numClusters = self->numClusters; vl_index i_d, i_cl; vl_size dim ; TYPE * oldMeans ; double time = 0 ; if (self->verbosity > 1) { VL_PRINTF("gmm: em: entering maximization step\n") ; time = vl_get_cpu_time() ; } oldMeans = vl_malloc(sizeof(TYPE) * self->dimension * numClusters) ; memcpy(oldMeans, means, sizeof(TYPE) * self->dimension * numClusters) ; memset(priors, 0, sizeof(TYPE) * numClusters) ; memset(means, 0, sizeof(TYPE) * self->dimension * numClusters) ; memset(covariances, 0, sizeof(TYPE) * self->dimension * numClusters) ; #if defined(_OPENMP) #pragma omp parallel default(shared) private(i_d, i_cl, dim) \ num_threads(vl_get_max_threads()) #endif { TYPE * clusterPosteriorSum_, * means_, * covariances_ ; #if defined(_OPENMP) #pragma omp critical #endif { clusterPosteriorSum_ = vl_calloc(sizeof(TYPE), numClusters) ; means_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ; covariances_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ; } /* Accumulate weighted sums and sum of square differences. Once normalized, these become the means and covariances of each Gaussian mode. The squared differences will be taken w.r.t. the old means however. In this manner, one avoids doing two passes across the data. Eventually, these are corrected to account for the new means properly. In principle, one could set the old means to zero, but this may cause numerical instabilities (by accumulating large squares). */ #if defined(_OPENMP) #pragma omp for #endif for (i_d = 0 ; i_d < (signed)numData ; ++i_d) { for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { TYPE p = posteriors[i_cl + i_d * self->numClusters] ; vl_bool calculated = VL_FALSE ; /* skip very small associations for speed */ if (p < VL_GMM_MIN_POSTERIOR / numClusters) { continue ; } clusterPosteriorSum_ [i_cl] += p ; #ifndef VL_DISABLE_AVX if (vl_get_simd_enabled() && vl_cpu_has_avx()) { VL_XCAT(_vl_weighted_mean_avx_, SFX) (self->dimension, means_+ i_cl * self->dimension, data + i_d * self->dimension, p) ; VL_XCAT(_vl_weighted_sigma_avx_, SFX) (self->dimension, covariances_ + i_cl * self->dimension, data + i_d * self->dimension, oldMeans + i_cl * self->dimension, p) ; calculated = VL_TRUE; } #endif #ifndef VL_DISABLE_SSE2 if (vl_get_simd_enabled() && vl_cpu_has_sse2() && !calculated) { VL_XCAT(_vl_weighted_mean_sse2_, SFX) (self->dimension, means_+ i_cl * self->dimension, data + i_d * self->dimension, p) ; VL_XCAT(_vl_weighted_sigma_sse2_, SFX) (self->dimension, covariances_ + i_cl * self->dimension, data + i_d * self->dimension, oldMeans + i_cl * self->dimension, p) ; calculated = VL_TRUE; } #endif if(!calculated) { for (dim = 0 ; dim < self->dimension ; ++dim) { TYPE x = data[i_d * self->dimension + dim] ; TYPE mu = oldMeans[i_cl * self->dimension + dim] ; TYPE diff = x - mu ; means_ [i_cl * self->dimension + dim] += p * x ; covariances_ [i_cl * self->dimension + dim] += p * (diff*diff) ; } } } } /* accumulate */ #if defined(_OPENMP) #pragma omp critical #endif { for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { priors [i_cl] += clusterPosteriorSum_ [i_cl]; for (dim = 0 ; dim < self->dimension ; ++dim) { means [i_cl * self->dimension + dim] += means_ [i_cl * self->dimension + dim] ; covariances [i_cl * self->dimension + dim] += covariances_ [i_cl * self->dimension + dim] ; } } vl_free(means_); vl_free(covariances_); vl_free(clusterPosteriorSum_); } } /* parallel section */ /* at this stage priors[] contains the total mass of each cluster */ for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) { TYPE mass = priors[i_cl] ; /* do not update modes that do not recieve mass */ if (mass >= 1e-6 / numClusters) { for (dim = 0 ; dim < self->dimension ; ++dim) { means[i_cl * self->dimension + dim] /= mass ; covariances[i_cl * self->dimension + dim] /= mass ; } } } /* apply old to new means correction */ for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) { TYPE mass = priors[i_cl] ; if (mass >= 1e-6 / numClusters) { for (dim = 0 ; dim < self->dimension ; ++dim) { TYPE mu = means[i_cl * self->dimension + dim] ; TYPE oldMu = oldMeans[i_cl * self->dimension + dim] ; TYPE diff = mu - oldMu ; covariances[i_cl * self->dimension + dim] -= diff * diff ; } } } VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ; { TYPE sum = 0; for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { sum += priors[i_cl] ; } sum = VL_MAX(sum, 1e-12) ; for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { priors[i_cl] /= sum ; } } if (self->verbosity > 1) { VL_PRINTF("gmm: em: maximization step completed in %.2f s\n", vl_get_cpu_time() - time) ; } vl_free(oldMeans); } /* ---------------------------------------------------------------- */ /* EM iterations */ /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_gmm_em_, SFX) (VlGMM * self, TYPE const * data, vl_size numData) { vl_size iteration, restarted ; double previousLL = (TYPE)(-VL_INFINITY_D) ; double LL = (TYPE)(-VL_INFINITY_D) ; double time = 0 ; _vl_gmm_prepare_for_data (self, numData) ; VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ; for (iteration = 0 ; 1 ; ++ iteration) { double eps ; /* Expectation: assign data to Gaussian modes and compute log-likelihood. */ if (self->verbosity > 1) { VL_PRINTF("gmm: em: entering expectation step\n") ; time = vl_get_cpu_time() ; } LL = VL_XCAT(vl_get_gmm_data_posteriors_,SFX) (self->posteriors, self->numClusters, numData, self->priors, self->means, self->dimension, self->covariances, data) ; if (self->verbosity > 1) { VL_PRINTF("gmm: em: expectation step completed in %.2f s\n", vl_get_cpu_time() - time) ; } /* Check the termination conditions. */ if (self->verbosity) { VL_PRINTF("gmm: em: iteration %d: loglikelihood = %f (variation = %f)\n", iteration, LL, LL - previousLL) ; } if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("gmm: em: terminating because " "the maximum number of iterations " "(%d) has been reached.\n", self->maxNumIterations) ; } break ; } eps = vl_abs_d ((LL - previousLL) / (LL)); if ((iteration > 0) && (eps < 0.00001)) { if (self->verbosity) { VL_PRINTF("gmm: em: terminating because the algorithm " "fully converged (log-likelihood variation = %f).\n", eps) ; } break ; } previousLL = LL ; /* Restart empty modes. */ if (iteration > 1) { restarted = VL_XCAT(_vl_gmm_restart_empty_modes_, SFX) (self, data); if ((restarted > 0) & (self->verbosity > 0)) { VL_PRINTF("gmm: em: %d Gaussian modes restarted because " "they had become empty.\n", restarted); } } /* Maximization: reestimate the GMM parameters. */ VL_XCAT(_vl_gmm_maximization_, SFX) (self,self->posteriors,self->priors,self->covariances,self->means,data,numData) ; } return LL; } /* ---------------------------------------------------------------- */ /* Kmeans initialization of mixtures */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_gmm_init_with_kmeans_, SFX) (VlGMM * self, TYPE const * data, vl_size numData, VlKMeans * kmeansInit) { vl_size i_d ; vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData); _vl_gmm_prepare_for_data (self, numData) ; memset(self->means,0,sizeof(TYPE) * self->numClusters * self->dimension) ; memset(self->priors,0,sizeof(TYPE) * self->numClusters) ; memset(self->covariances,0,sizeof(TYPE) * self->numClusters * self->dimension) ; memset(self->posteriors,0,sizeof(TYPE) * self->numClusters * numData) ; /* setup speified KMeans initialization object if any */ if (kmeansInit) { vl_gmm_set_kmeans_init_object (self, kmeansInit) ; } /* if a KMeans initalization object is still unavailable, create one */ if(self->kmeansInit == NULL) { vl_size ncomparisons = VL_MAX(numData / 4, 10) ; vl_size niter = 5 ; vl_size ntrees = 1 ; vl_size nrepetitions = 1 ; VlKMeansAlgorithm algorithm = VlKMeansANN ; VlKMeansInitialization initialization = VlKMeansRandomSelection ; VlKMeans * kmeansInitDefault = vl_kmeans_new(self->dataType,VlDistanceL2) ; vl_kmeans_set_initialization(kmeansInitDefault, initialization); vl_kmeans_set_max_num_iterations (kmeansInitDefault, niter) ; vl_kmeans_set_max_num_comparisons (kmeansInitDefault, ncomparisons) ; vl_kmeans_set_num_trees (kmeansInitDefault, ntrees); vl_kmeans_set_algorithm (kmeansInitDefault, algorithm); vl_kmeans_set_num_repetitions(kmeansInitDefault, nrepetitions); vl_kmeans_set_verbosity (kmeansInitDefault, self->verbosity); self->kmeansInit = kmeansInitDefault; self->kmeansInitIsOwner = VL_TRUE ; } /* Use k-means to assign data to clusters */ vl_kmeans_cluster (self->kmeansInit, data, self->dimension, numData, self->numClusters); vl_kmeans_quantize (self->kmeansInit, assignments, NULL, data, numData) ; /* Transform the k-means assignments in posteriors and estimates the mode parameters */ for(i_d = 0; i_d < numData; i_d++) { ((TYPE*)self->posteriors)[assignments[i_d] + i_d * self->numClusters] = (TYPE) 1.0 ; } /* Update cluster parameters */ VL_XCAT(_vl_gmm_maximization_, SFX) (self,self->posteriors,self->priors,self->covariances,self->means,data,numData); vl_free(assignments) ; } /* ---------------------------------------------------------------- */ /* Random initialization of mixtures */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_gmm_compute_init_sigma_, SFX) (VlGMM * self, TYPE const * data, TYPE * initSigma, vl_size dimension, vl_size numData) { vl_size dim; vl_uindex i; TYPE * dataMean ; memset(initSigma,0,sizeof(TYPE)*dimension) ; if (numData <= 1) return ; dataMean = vl_malloc(sizeof(TYPE)*dimension); memset(dataMean,0,sizeof(TYPE)*dimension) ; /* find mean of the whole dataset */ for(dim = 0 ; dim < dimension ; dim++) { for(i = 0 ; i < numData ; i++) { dataMean[dim] += data[i*dimension + dim]; } dataMean[dim] /= numData; } /* compute variance of the whole dataset */ for(dim = 0; dim < dimension; dim++) { for(i = 0; i < numData; i++) { TYPE diff = (data[i*self->dimension + dim] - dataMean[dim]) ; initSigma[dim] += diff*diff ; } initSigma[dim] /= numData - 1 ; } vl_free(dataMean) ; } static void VL_XCAT(_vl_gmm_init_with_rand_data_, SFX) (VlGMM * self, TYPE const * data, vl_size numData) { vl_uindex i, k, dim ; VlKMeans * kmeans ; _vl_gmm_prepare_for_data(self, numData) ; /* initilaize priors of gaussians so they are equal and sum to one */ for (i = 0 ; i < self->numClusters ; ++i) { ((TYPE*)self->priors)[i] = (TYPE) (1.0 / self->numClusters) ; } /* initialize diagonals of covariance matrices to data covariance */ VL_XCAT(_vl_gmm_compute_init_sigma_, SFX) (self, data, self->covariances, self->dimension, numData); for (k = 1 ; k < self->numClusters ; ++ k) { for(dim = 0; dim < self->dimension; dim++) { *((TYPE*)self->covariances + k * self->dimension + dim) = *((TYPE*)self->covariances + dim) ; } } /* use kmeans++ initialization to pick points at random */ kmeans = vl_kmeans_new(self->dataType,VlDistanceL2) ; vl_kmeans_init_centers_plus_plus(kmeans, data, self->dimension, numData, self->numClusters) ; memcpy(self->means, vl_kmeans_get_centers(kmeans), sizeof(TYPE) * self->dimension * self->numClusters) ; vl_kmeans_delete(kmeans) ; } /* ---------------------------------------------------------------- */ #else /* VL_GMM_INSTANTIATING */ /* ---------------------------------------------------------------- */ #ifndef __DOXYGEN__ #define FLT VL_TYPE_FLOAT #define TYPE float #define SFX f #define VL_GMM_INSTANTIATING #include "gmm.c" #define FLT VL_TYPE_DOUBLE #define TYPE double #define SFX d #define VL_GMM_INSTANTIATING #include "gmm.c" #endif /* VL_GMM_INSTANTIATING */ #endif /* ---------------------------------------------------------------- */ #ifndef VL_GMM_INSTANTIATING /* ---------------------------------------------------------------- */ /** @brief Create a new GMM object by copy ** @param self object. ** @return new copy. ** ** Most parameters, including the cluster priors, means, and ** covariances are copied. Data posteriors (available after ** initalization or EM) are not; nor is the KMeans object used for ** initialization, if any. **/ VlGMM * vl_gmm_new_copy (VlGMM const * self) { vl_size size = vl_get_type_size(self->dataType) ; VlGMM * gmm = vl_gmm_new(self->dataType, self->dimension, self->numClusters); gmm->initialization = self->initialization; gmm->maxNumIterations = self->maxNumIterations; gmm->numRepetitions = self->numRepetitions; gmm->verbosity = self->verbosity; gmm->LL = self->LL; memcpy(gmm->means, self->means, size*self->numClusters*self->dimension); memcpy(gmm->covariances, self->covariances, size*self->numClusters*self->dimension); memcpy(gmm->priors, self->priors, size*self->numClusters); return gmm ; } /** @brief Initialize mixture before EM takes place using random initialization ** @param self GMM object instance. ** @param data data points which should be clustered. ** @param numData number of data points. **/ void vl_gmm_init_with_rand_data (VlGMM * self, void const * data, vl_size numData) { vl_gmm_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_gmm_init_with_rand_data_f (self, (float const *)data, numData) ; break ; case VL_TYPE_DOUBLE : _vl_gmm_init_with_rand_data_d (self, (double const *)data, numData) ; break ; default: abort() ; } } /** @brief Initializes the GMM using KMeans ** @param self GMM object instance. ** @param data data points which should be clustered. ** @param numData number of data points. ** @param kmeansInit KMeans object to use. **/ void vl_gmm_init_with_kmeans (VlGMM * self, void const * data, vl_size numData, VlKMeans * kmeansInit) { vl_gmm_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_gmm_init_with_kmeans_f (self, (float const *)data, numData, kmeansInit) ; break ; case VL_TYPE_DOUBLE : _vl_gmm_init_with_kmeans_d (self, (double const *)data, numData, kmeansInit) ; break ; default: abort() ; } } #if 0 #include<fenv.h> #endif /** @brief Run GMM clustering - includes initialization and EM ** @param self GMM object instance. ** @param data data points which should be clustered. ** @param numData number of data points. **/ double vl_gmm_cluster (VlGMM * self, void const * data, vl_size numData) { void * bestPriors = NULL ; void * bestMeans = NULL; void * bestCovariances = NULL; void * bestPosteriors = NULL; vl_size size = vl_get_type_size(self->dataType) ; double bestLL = -VL_INFINITY_D; vl_uindex repetition; assert(self->numRepetitions >=1) ; bestPriors = vl_malloc(size * self->numClusters) ; bestMeans = vl_malloc(size * self->dimension * self->numClusters) ; bestCovariances = vl_malloc(size * self->dimension * self->numClusters) ; bestPosteriors = vl_malloc(size * self->numClusters * numData) ; #if 0 feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW); #endif for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) { double LL ; double timeRef ; if (self->verbosity) { VL_PRINTF("gmm: clustering: starting repetition %d of %d\n", repetition + 1, self->numRepetitions) ; } /* initialize a new mixture model */ timeRef = vl_get_cpu_time() ; switch (self->initialization) { case VlGMMKMeans : vl_gmm_init_with_kmeans (self, data, numData, NULL) ; break ; case VlGMMRand : vl_gmm_init_with_rand_data (self, data, numData) ; break ; case VlGMMCustom : break ; default: abort() ; } if (self->verbosity) { VL_PRINTF("gmm: model initialized in %.2f s\n", vl_get_cpu_time() - timeRef) ; } /* fit the model to data by running EM */ timeRef = vl_get_cpu_time () ; LL = vl_gmm_em (self, data, numData) ; if (self->verbosity) { VL_PRINTF("gmm: optimization terminated in %.2f s with loglikelihood %f\n", vl_get_cpu_time() - timeRef, LL) ; } if (LL > bestLL || repetition == 0) { void * temp ; temp = bestPriors ; bestPriors = self->priors ; self->priors = temp ; temp = bestMeans ; bestMeans = self->means ; self->means = temp ; temp = bestCovariances ; bestCovariances = self->covariances ; self->covariances = temp ; temp = bestPosteriors ; bestPosteriors = self->posteriors ; self->posteriors = temp ; bestLL = LL; } } vl_free (self->priors) ; vl_free (self->means) ; vl_free (self->covariances) ; vl_free (self->posteriors) ; self->priors = bestPriors ; self->means = bestMeans ; self->covariances = bestCovariances ; self->posteriors = bestPosteriors ; self->LL = bestLL; if (self->verbosity) { VL_PRINTF("gmm: all repetitions terminated with final loglikelihood %f\n", self->LL) ; } return bestLL ; } /** @brief Invoke the EM algorithm. ** @param self GMM object instance. ** @param data data points which should be clustered. ** @param numData number of data points. **/ double vl_gmm_em (VlGMM * self, void const * data, vl_size numData) { switch (self->dataType) { case VL_TYPE_FLOAT: return _vl_gmm_em_f (self, (float const *)data, numData) ; break ; case VL_TYPE_DOUBLE: return _vl_gmm_em_d (self, (double const *)data, numData) ; break ; default: abort() ; } return 0 ; } /** @brief Explicitly set the initial means for EM. ** @param self GMM object instance. ** @param means initial values of means. **/ void vl_gmm_set_means (VlGMM * self, void const * means) { memcpy(self->means,means, self->dimension * self->numClusters * vl_get_type_size(self->dataType)); } /** @brief Explicitly set the initial sigma diagonals for EM. ** @param self GMM object instance. ** @param covariances initial values of covariance matrix diagonals. **/ void vl_gmm_set_covariances (VlGMM * self, void const * covariances) { memcpy(self->covariances,covariances, self->dimension * self->numClusters * vl_get_type_size(self->dataType)); } /** @brief Explicitly set the initial priors of the gaussians. ** @param self GMM object instance. ** @param priors initial values of the gaussian priors. **/ void vl_gmm_set_priors (VlGMM * self, void const * priors) { memcpy(self->priors,priors, self->numClusters * vl_get_type_size(self->dataType)); } /* VL_GMM_INSTANTIATING */ #endif #undef SFX #undef TYPE #undef FLT #undef VL_GMM_INSTANTIATING
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickWand Deprecated Methods % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define PixelViewId "PixelView" /* Typedef declarations. */ struct _PixelView { size_t id; char name[MaxTextExtent]; ExceptionInfo *exception; MagickWand *wand; CacheView *view; RectangleInfo region; size_t number_threads; PixelWand ***pixel_wands; MagickBooleanType debug; size_t signature; }; #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w A l l o c a t e W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAllocateWand() allocates an initial drawing wand which is an opaque % handle required by the remaining drawing methods. % % The format of the DrawAllocateWand method is: % % DrawingWand DrawAllocateWand(const DrawInfo *draw_info,Image *image) % % A description of each parameter follows: % % o draw_info: Initial drawing defaults. Set to NULL to use defaults. % % o image: the image to draw on. % */ WandExport DrawingWand *DrawAllocateWand(const DrawInfo *draw_info,Image *image) { return(AcquireDrawingWand(draw_info,image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickAverageImages() average a set of images. % % The format of the MagickAverageImages method is: % % MagickWand *MagickAverageImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ static MagickWand *CloneMagickWandFromImages(const MagickWand *wand, Image *images) { MagickWand *clone_wand; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand)); if (clone_wand == (MagickWand *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", images->filename); (void) memset(clone_wand,0,sizeof(*clone_wand)); clone_wand->id=AcquireWandId(); (void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g", MagickWandId,(double) clone_wand->id); clone_wand->exception=AcquireExceptionInfo(); InheritException(clone_wand->exception,wand->exception); clone_wand->image_info=CloneImageInfo(wand->image_info); clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info); clone_wand->images=images; clone_wand->debug=IsEventLogging(); if (clone_wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name); clone_wand->signature=WandSignature; return(clone_wand); } WandExport MagickWand *MagickAverageImages(MagickWand *wand) { Image *average_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); average_image=EvaluateImages(wand->images,MeanEvaluateOperator, wand->exception); if (average_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,average_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelView() makes a copy of the specified pixel view. % % The format of the ClonePixelView method is: % % PixelView *ClonePixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelView *ClonePixelView(const PixelView *pixel_view) { PixelView *clone_view; register ssize_t i; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) clone_view->id); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,pixel_view->exception); clone_view->view=CloneCacheView(pixel_view->view); clone_view->region=pixel_view->region; clone_view->number_threads=pixel_view->number_threads; for (i=0; i < (ssize_t) pixel_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) pixel_view->pixel_wands[i],pixel_view->region.width); clone_view->debug=pixel_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelView() deallocates memory associated with a pixel view. % % The format of the DestroyPixelView method is: % % PixelView *DestroyPixelView(PixelView *pixel_view, % const size_t number_wands,const size_t number_threads) % % A description of each parameter follows: % % o pixel_view: the pixel view. % % o number_wand: the number of pixel wands. % % o number_threads: number of threads. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport PixelView *DestroyPixelView(PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands, pixel_view->region.width,pixel_view->number_threads); pixel_view->view=DestroyCacheView(pixel_view->view); pixel_view->exception=DestroyExceptionInfo(pixel_view->exception); pixel_view->signature=(~WandSignature); RelinquishWandId(pixel_view->id); pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view); return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferPixelViewIterator() iterates over three pixel views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel region is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination pixel view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferPixelViewIterator method is: % % MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source, % PixelView *duplex,PixelView *destination, % DuplexTransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o duplex: the duplex pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferPixelViewIterator( PixelView *source,PixelView *duplex,PixelView *destination, DuplexTransferPixelViewMethod transfer,void *context) { #define DuplexTransferPixelViewTag "PixelView/DuplexTransfer" ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict duplex_indexes, *magick_restrict indexes; register const PixelPacket *magick_restrict duplex_pixels, *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y, duplex->region.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag, progress++,source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewException() returns the severity, reason, and description of any % error that occurs when utilizing a pixel view. % % The format of the GetPixelViewException method is: % % char *GetPixelViewException(const PixelWand *pixel_view, % ExceptionType *severity) % % A description of each parameter follows: % % o pixel_view: the pixel pixel_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetPixelViewException(const PixelView *pixel_view, ExceptionType *severity) { char *description; assert(pixel_view != (const PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); assert(severity != (ExceptionType *) NULL); *severity=pixel_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); *description='\0'; if (pixel_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->reason), MaxTextExtent); if (pixel_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w H e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewHeight() returns the pixel view height. % % The format of the GetPixelViewHeight method is: % % size_t GetPixelViewHeight(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewHeight(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.height); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewIterator() iterates over the pixel view in parallel and calls % your get method for each scanline of the view. The pixel region is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetPixelViewIterator method is: % % MagickBooleanType GetPixelViewIterator(PixelView *source, % GetPixelViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetPixelViewIterator(PixelView *source, GetPixelViewMethod get,void *context) { #define GetPixelViewTag "PixelView/Get" Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (get == (GetPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetPixelViewIterator) #endif proceed=SetImageProgress(source_image,GetPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewPixels() returns the pixel view pixel_wands. % % The format of the GetPixelViewPixels method is: % % PixelWand *GetPixelViewPixels(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view) { const int id = GetOpenMPThreadId(); assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWand() returns the magick wand associated with the pixel view. % % The format of the GetPixelViewWand method is: % % MagickWand *GetPixelViewWand(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W i d t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWidth() returns the pixel view width. % % The format of the GetPixelViewWidth method is: % % size_t GetPixelViewWidth(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewWidth(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewX() returns the pixel view x offset. % % The format of the GetPixelViewX method is: % % ssize_t GetPixelViewX(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewX(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.x); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w Y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewY() returns the pixel view y offset. % % The format of the GetPixelViewY method is: % % ssize_t GetPixelViewY(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewY(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.y); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPixelView() returns MagickTrue if the the parameter is verified as a pixel % view container. % % The format of the IsPixelView method is: % % MagickBooleanType IsPixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view) { size_t length; if (pixel_view == (const PixelView *) NULL) return(MagickFalse); if (pixel_view->signature != WandSignature) return(MagickFalse); length=strlen(PixelViewId); if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C l i p P a t h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickClipPathImage() clips along the named paths from the 8BIM profile, if % present. Later operations take effect inside the path. Id may be a number % if preceded with #, to work on a numbered path, e.g., "#1" to use the first % path. % % The format of the MagickClipPathImage method is: % % MagickBooleanType MagickClipPathImage(MagickWand *wand, % const char *pathname,const MagickBooleanType inside) % % A description of each parameter follows: % % o wand: the magick wand. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand, const char *pathname,const MagickBooleanType inside) { return(MagickClipImagePath(wand,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetFillAlpha() returns the alpha used when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawGetFillAlpha method is: % % double DrawGetFillAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport double DrawGetFillAlpha(const DrawingWand *wand) { return(DrawGetFillOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetStrokeAlpha() returns the alpha of stroked object outlines. % % The format of the DrawGetStrokeAlpha method is: % % double DrawGetStrokeAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. */ WandExport double DrawGetStrokeAlpha(const DrawingWand *wand) { return(DrawGetStrokeOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P e e k G r a p h i c W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPeekGraphicWand() returns the current drawing wand. % % The format of the PeekDrawingWand method is: % % DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) { return(PeekDrawingWand(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P o p G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPopGraphicContext() destroys the current drawing wand and returns to the % previously pushed drawing wand. Multiple drawing wands may exist. It is an % error to attempt to pop more drawing wands than have been pushed, and it is % proper form to pop all drawing wands which have been pushed. % % The format of the DrawPopGraphicContext method is: % % MagickBooleanType DrawPopGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPopGraphicContext(DrawingWand *wand) { (void) PopDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P u s h G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPushGraphicContext() clones the current drawing wand to create a new % drawing wand. The original drawing wand(s) may be returned to by % invoking PopDrawingWand(). The drawing wands are stored on a drawing wand % stack. For every Pop there must have already been an equivalent Push. % % The format of the DrawPushGraphicContext method is: % % MagickBooleanType DrawPushGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPushGraphicContext(DrawingWand *wand) { (void) PushDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetFillAlpha() sets the alpha to use when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawSetFillAlpha method is: % % void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o fill_alpha: fill alpha % */ WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) { DrawSetFillOpacity(wand,fill_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetStrokeAlpha() specifies the alpha of stroked object outlines. % % The format of the DrawSetStrokeAlpha method is: % % void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o stroke_alpha: stroke alpha. The value 1.0 is opaque. % */ WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) { DrawSetStrokeOpacity(wand,stroke_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickColorFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickColorFloodfillImage method is: % % MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, % const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); PixelGetQuantumColor(fill,&draw_info->fill); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=ColorFloodfillImage(wand->images,draw_info,target,x,y, bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickDescribeImage() identifies an image by printing its attributes to the % file. Attributes include the image width, height, size, and others. % % The format of the MagickDescribeImage method is: % % const char *MagickDescribeImage(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport char *MagickDescribeImage(MagickWand *wand) { return(MagickIdentifyImage(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k F l a t t e n I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickFlattenImages() merges a sequence of images. This useful for % combining Photoshop layers into a single image. % % The format of the MagickFlattenImages method is: % % MagickWand *MagickFlattenImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickFlattenImages(MagickWand *wand) { Image *flatten_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); flatten_image=FlattenImages(wand->images,wand->exception); if (flatten_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,flatten_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageAttribute() returns a value associated with the specified % property. Use MagickRelinquishMemory() to free the value when you are % finished with it. % % The format of the MagickGetImageAttribute method is: % % char *MagickGetImageAttribute(MagickWand *wand,const char *property) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % */ WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property) { return(MagickGetImageProperty(wand,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageIndex() returns the index of the current image. % % The format of the MagickGetImageIndex method is: % % ssize_t MagickGetImageIndex(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport ssize_t MagickGetImageIndex(MagickWand *wand) { return(MagickGetIteratorIndex(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageChannelExtrema() gets the extrema for one or more image % channels. % % The format of the MagickGetImageChannelExtrema method is: % % MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, % const ChannelType channel,size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, const ChannelType channel,size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageChannelExtrema(wand->images,channel,minima,maxima, wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageExtrema() gets the extrema for the image. % % The format of the MagickGetImageExtrema method is: % % MagickBooleanType MagickGetImageExtrema(MagickWand *wand, % size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand, size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageExtrema(wand->images,minima,maxima,wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e M a t t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageMatte() returns MagickTrue if the image has a matte channel % otherwise MagickFalse. % % The format of the MagickGetImageMatte method is: % % size_t MagickGetImageMatte(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(wand->images->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImagePixels() extracts pixel data from an image and returns it to % you. The method returns MagickTrue on success otherwise MagickFalse if an % error is encountered. The data is returned as char, short int, int, ssize_t, % float, or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickGetImagePixels method is: % % MagickBooleanType MagickGetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, void *pixels) { return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageSize() returns the image length in bytes. % % The format of the MagickGetImageSize method is: % % MagickBooleanType MagickGetImageSize(MagickWand *wand, % MagickSizeType *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the image length in bytes. % */ WandExport MagickSizeType MagickGetImageSize(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(GetBlobSize(wand->images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMapImage() replaces the colors of an image with the closest color % from a reference image. % % The format of the MagickMapImage method is: % % MagickBooleanType MagickMapImage(MagickWand *wand, % const MagickWand *map_wand,const MagickBooleanType dither) % % A description of each parameter follows: % % o wand: the magick wand. % % o map: the map wand. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ WandExport MagickBooleanType MagickMapImage(MagickWand *wand, const MagickWand *map_wand,const MagickBooleanType dither) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL)) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=MapImage(wand->images,map_wand->images,dither); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMatteFloodfillImage() changes the transparency value of any pixel that % matches target and is an immediate neighbor. If the method % FillToBorderMethod is specified, the transparency value is changed for any % neighbor pixel that does not match the bordercolor member of image. % % The format of the MagickMatteFloodfillImage method is: % % MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, % const double alpha,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, const double alpha,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=MatteFloodfillImage(wand->images,target,ClampToQuantum( (MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The format of the MagickMedianFilterImage method is: % % MagickBooleanType MagickMedianFilterImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand, const double radius) { Image *median_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); median_image=MedianFilterImage(wand->images,radius,wand->exception); if (median_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,median_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMinimumImages() returns the minimum intensity of an image sequence. % % The format of the MagickMinimumImages method is: % % MagickWand *MagickMinimumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMinimumImages(MagickWand *wand) { Image *minimum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); minimum_image=EvaluateImages(wand->images,MinEvaluateOperator, wand->exception); if (minimum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,minimum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickModeImage() makes each pixel the 'predominant color' of the % neighborhood of the specified radius. % % The format of the MagickModeImage method is: % % MagickBooleanType MagickModeImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickModeImage(MagickWand *wand, const double radius) { Image *mode_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); mode_image=ModeImage(wand->images,radius,wand->exception); if (mode_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,mode_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMosaicImages() inlays an image sequence to form a single coherent % picture. It returns a wand with each image in the sequence composited at % the location defined by the page offset of the image. % % The format of the MagickMosaicImages method is: % % MagickWand *MagickMosaicImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMosaicImages(MagickWand *wand) { Image *mosaic_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); mosaic_image=MosaicImages(wand->images,wand->exception); if (mosaic_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,mosaic_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickOpaqueImage method is: % % MagickBooleanType MagickOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImage(wand,target,fill,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickPaintFloodfillImage method is: % % MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, % const ChannelType channel,const PixelWand *fill,const double fuzz, % const PixelWand *bordercolor,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, const ChannelType channel,const PixelWand *fill,const double fuzz, const PixelWand *bordercolor,const ssize_t x,const ssize_t y) { MagickBooleanType status; status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickPaintOpaqueImage method is: % % MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, % const ChannelType channel,const PixelWand *target, % const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz)); } WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, const ChannelType channel,const PixelWand *target,const PixelWand *fill, const double fuzz) { MagickBooleanType status; status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickPaintTransparentImage method is: % % MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R a d i a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRadialBlurImage() radial blurs an image. % % The format of the MagickRadialBlurImage method is: % % MagickBooleanType MagickRadialBlurImage(MagickWand *wand, % const double angle) % MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand, % const ChannelType channel,const double angle) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o angle: the angle of the blur in degrees. % */ WandExport MagickBooleanType MagickRadialBlurImage(MagickWand *wand, const double angle) { return(MagickRotationalBlurImage(wand,angle)); } WandExport MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand, const ChannelType channel,const double angle) { return(MagickRotationalBlurImageChannel(wand,channel,angle)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRecolorImage() apply color transformation to an image. The method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the MagickRecolorImage method is: % % MagickBooleanType MagickRecolorImage(MagickWand *wand, % const size_t order,const double *color_matrix) % % A description of each parameter follows: % % o wand: the magick wand. % % o order: the number of columns and rows in the color matrix. % % o color_matrix: An array of doubles representing the color matrix. % */ WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand, const size_t order,const double *color_matrix) { Image *transform_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (color_matrix == (const double *) NULL) return(MagickFalse); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); transform_image=RecolorImage(wand->images,order,color_matrix, wand->exception); if (transform_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,transform_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickReduceNoiseImage() smooths the contours of an image while still % preserving edge information. The algorithm works by replacing each pixel % with its neighbor closest in value. A neighbor is defined by radius. Use % a radius of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the MagickReduceNoiseImage method is: % % MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, const double radius) { Image *noise_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); noise_image=ReduceNoiseImage(wand->images,radius,wand->exception); if (noise_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,noise_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMaximumImages() returns the maximum intensity of an image sequence. % % The format of the MagickMaximumImages method is: % % MagickWand *MagickMaximumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMaximumImages(MagickWand *wand) { Image *maximum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator, wand->exception); if (maximum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,maximum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageAttribute() associates a property with an image. % % The format of the MagickSetImageAttribute method is: % % MagickBooleanType MagickSetImageAttribute(MagickWand *wand, % const char *property,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % % o value: the value. % */ WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand, const char *property,const char *value) { return(SetImageProperty(wand->images,property,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageIndex() set the current image to the position of the list % specified with the index parameter. % % The format of the MagickSetImageIndex method is: % % MagickBooleanType MagickSetImageIndex(MagickWand *wand, % const ssize_t index) % % A description of each parameter follows: % % o wand: the magick wand. % % o index: the scene number. % */ WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand, const ssize_t index) { return(MagickSetIteratorIndex(wand,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k S e t I m a g e O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageOption() associates one or options with a particular image % format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes"). % % The format of the MagickSetImageOption method is: % % MagickBooleanType MagickSetImageOption(MagickWand *wand, % const char *format,const char *key,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o format: the image format. % % o key: The key. % % o value: The value. % */ WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand, const char *format,const char *key,const char *value) { char option[MaxTextExtent]; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); (void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value); return(DefineImageOption(wand->image_info,option)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickTransparentImage method is: % % MagickBooleanType MagickTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickPaintTransparentImage(wand,target,alpha,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e g i o n O f I n t e r e s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRegionOfInterestImage() extracts a region of the image and returns it % as a new wand. % % The format of the MagickRegionOfInterestImage method is: % % MagickWand *MagickRegionOfInterestImage(MagickWand *wand, % const size_t width,const size_t height,const ssize_t x, % const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o width: the region width. % % o height: the region height. % % o x: the region x offset. % % o y: the region y offset. % */ WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand, const size_t width,const size_t height,const ssize_t x, const ssize_t y) { return(MagickGetImageRegion(wand,width,height,x,y)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImagePixels() accepts pixel datand stores it in the image at the % location you specify. The method returns MagickFalse on success otherwise % MagickTrue if an error is encountered. The pixel data can be either char, % short int, int, ssize_t, float, or double in the order specified by map. % % Suppose your want to upload the first scanline of a 640x480 image from % character data in red-green-blue order: % % MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickSetImagePixels method is: % % MagickBooleanType MagickSetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % const void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter of a region % of pixels you want to define. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, % or DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, const void *pixels) { return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k W r i t e I m a g e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickWriteImageBlob() implements direct to memory image formats. It % returns the image as a blob and its length. Use MagickSetFormat() to % set the format of the returned blob (GIF, JPEG, PNG, etc.). % % Use MagickRelinquishMemory() to free the blob when you are done with it. % % The format of the MagickWriteImageBlob method is: % % unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the length of the blob. % */ WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) { return(MagickGetImageBlob(wand,length)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelView() returns a pixel view required for all other methods in the % Pixel View API. % % The format of the NewPixelView method is: % % PixelView *NewPixelView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport PixelView *NewPixelView(MagickWand *wand) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickCoreSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->wand=wand; pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->region.width=wand->images->columns; pixel_view->region.height=wand->images->rows; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelViewRegion() returns a pixel view required for all other methods % in the Pixel View API. % % The format of the NewPixelViewRegion method is: % % PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a region of % pixel_wands view. % */ WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickCoreSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->wand=wand; pixel_view->region.width=width; pixel_view->region.height=height; pixel_view->region.x=x; pixel_view->region.y=y; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l G e t N e x t R o w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelGetNextRow() returns the next row as an array of pixel wands from the % pixel iterator. % % The format of the PixelGetNextRow method is: % % PixelWand **PixelGetNextRow(PixelIterator *iterator, % size_t *number_wands) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o number_wands: the number of pixel wands. % */ WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator) { size_t number_wands; return(PixelGetNextIteratorRow(iterator,&number_wands)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l I t e r a t o r G e t E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelIteratorGetException() returns the severity, reason, and description of % any error that occurs when using other methods in this API. % % The format of the PixelIteratorGetException method is: % % char *PixelIteratorGetException(const Pixeliterator *iterator, % ExceptionType *severity) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o severity: the severity of the error is returned here. % */ WandExport char *PixelIteratorGetException(const PixelIterator *iterator, ExceptionType *severity) { return(PixelGetIteratorException(iterator,severity)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelViewIterator() iterates over the pixel view in parallel and calls % your set method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetPixelViewIterator method is: % % MagickBooleanType SetPixelViewIterator(PixelView *destination, % SetPixelViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the pixel view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination, SetPixelViewMethod set,void *context) { #define SetPixelViewTag "PixelView/Set" ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (PixelView *) NULL); assert(destination->signature == WandSignature); if (set == (SetPixelViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=destination->region.y; y < (ssize_t) destination->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x, y,destination->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetPixelViewIterator) #endif proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++, destination->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferPixelViewIterator() iterates over two pixel views in parallel and % calls your transfer method for each scanline of the view. The source pixel % region is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination pixel view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferPixelViewIterator method is: % % MagickBooleanType TransferPixelViewIterator(PixelView *source, % PixelView *destination,TransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source, PixelView *destination,TransferPixelViewMethod transfer,void *context) { #define TransferPixelViewTag "PixelView/Transfer" ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdatePixelViewIterator() iterates over the pixel view in parallel and calls % your update method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdatePixelViewIterator method is: % % MagickBooleanType UpdatePixelViewIterator(PixelView *source, % UpdatePixelViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source, UpdatePixelViewMethod update,void *context) { #define UpdatePixelViewTag "PixelView/Update" ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (update == (UpdatePixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y, source->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (update(source,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->region.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdatePixelViewIterator) #endif proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } #endif
snobal.h
/* ** NAME ** snobal.h ** ** DESCRIPTION ** Include file for the snobal library. */ #ifndef _SNOBAL_H_ #define _SNOBAL_H_ #include "types.h" /* * default for snowcover's maximum liquid h2o content as volume * ratio: V_water/(V_snow - V_ice) */ #define DEFAULT_MAX_H2O_VOL 0.01 /* * default for maximum active (surface) layer depth (m) */ #define DEFAULT_MAX_Z_S_0 0.25 /* * default for depth of soil temperature measurement (m) */ #define DEFAULT_Z_G 0.5 /* * Minimum valid snow temperature (C). This is also what temperatures * are set to when there's no snow (instead of 0 K). This yields a * smaller quantization range in the output image: -75 C to 0 C * (instead of -273.16 C to 0 C). */ #define MIN_SNOW_TEMP -75 /* * default for medium run timestep (minutes) */ #define DEFAULT_MEDIUM_TSTEP 15 /* * default for small run timestep (minutes) */ #define DEFAULT_SMALL_TSTEP 1 /* * default for normal run timestep's threshold for a layer's mass * (kg/m^2) */ #define DEFAULT_NORMAL_THRESHOLD 60.0 /* * default for medium run timestep's threshold for a layer's mass * (kg/m^2) */ #define DEFAULT_MEDIUM_THRESHOLD 10.0 /* * default for small run timestep's threshold for a layer's mass * (kg/m^2) */ #define DEFAULT_SMALL_THRESHOLD 1.0 /* * Does a time fall within the current input data timestep? */ #define IN_CURR_DATA_TSTEP(time) \ ((current_time <= (time)) && \ ((time) < current_time + tstep_info[DATA_TSTEP].time_step)) /* ------------------------------------------------------------------------ */ /* * Public routines in the snobal library. */ extern void init_snow(void); extern int do_data_tstep(void); /* ------------------------------------------------------------------------ */ /* * Global variables that are used to communicate with the snobal library * routines. */ /* variables that control model execution */ extern int run_no_snow; /* continue model even if snow disappears? */ extern int stop_no_snow; /* stopped model because no snow left? */ extern void (*out_func)(void); /* -> output function */ /* constant model parameters */ extern double max_z_s_0; /* maximum active layer thickness (m) */ extern double max_h2o_vol; /* max liquid h2o content as volume ratio: V_water/(V_snow - V_ice) (unitless) */ /* time step information */ typedef struct { int level; /* timestep's level */ #define DATA_TSTEP 0 #define NORMAL_TSTEP 1 #define MEDIUM_TSTEP 2 #define SMALL_TSTEP 3 double time_step; /* length of timestep (seconds) */ int intervals; /* # of these timestep that are in the previous-level's timestep (not used for level 0: data tstep) */ double threshold; /* mass threshold for a layer to use this timestep (not used for level 0: data tstep) */ int output; /* flags whether or not to call output function for timestep */ #define WHOLE_TSTEP 0x1 /* output when tstep is not divided */ #define DIVIDED_TSTEP 0x2 /* output when timestep is divided */ } TSTEP_REC; extern TSTEP_REC tstep_info[4]; /* array of info for each timestep: 0 : data timestep 1 : normal run timestep 2 : medium " " 3 : small " " */ extern double time_step; /* length current timestep (sec) */ extern double current_time; /* start time of current time step (sec) */ extern double time_since_out; /* time since last output record (sec) */ /* snowpack information */ extern int layer_count; /* number of layers in snowcover: 0, 1, or 2 */ extern double z_s; /* total snowcover thickness (m) */ extern double z_s_0; /* active layer depth (m) */ extern double z_s_l; /* lower layer depth (m) */ extern double rho; /* average snowcover density (kg/m^3) */ extern double m_s; /* snowcover's specific mass (kg/m^2) */ extern double m_s_0; /* active layer specific mass (kg/m^2) */ extern double m_s_l; /* lower layer specific mass (kg/m^2) */ extern double T_s; /* average snowcover temp (K) */ extern double T_s_0; /* active snow layer temp (K) */ extern double T_s_l; /* lower layer temp (C) */ extern double cc_s; /* snowcover's cold content (J/m^2) */ extern double cc_s_0; /* active layer cold content (J/m^2) */ extern double cc_s_l; /* lower layer cold content (J/m^2) */ extern double h2o_sat; /* % of liquid H2O saturation (relative water content, i.e., ratio of water in snowcover to water that snowcover could hold at saturation) */ extern double h2o_vol; /* liquid h2o content as volume ratio: V_water/(V_snow - V_ice) (unitless) */ extern double h2o; /* liquid h2o content as specific mass (kg/m^2) */ extern double h2o_max; /* max liquid h2o content as specific mass (kg/m^2) */ extern double h2o_total; /* total liquid h2o: includes h2o in snowcover, melt, and rainfall (kg/m^2) */ /* climate-data input records */ extern int ro_data; /* runoff data? */ typedef struct { double S_n; /* net solar radiation (W/m^2) */ double I_lw; /* incoming longwave (thermal) rad (W/m^2) */ double T_a; /* air temp (C) */ double e_a; /* vapor pressure (Pa) */ double u; /* wind speed (m/sec) */ double T_g; /* soil temp at depth z_g (C) */ double ro; /* measured runoff (m/sec) */ } INPUT_REC; extern INPUT_REC input_rec1; /* input data for start of data timestep */ extern INPUT_REC input_rec2; /* " " " end " " " */ /* climate-data input values for the current run timestep */ extern double S_n; /* net solar radiation (W/m^2) */ extern double I_lw; /* incoming longwave (thermal) rad (W/m^2) */ extern double T_a; /* air temp (C) */ extern double e_a; /* vapor pressure (Pa) */ extern double u; /* wind speed (m/sec) */ extern double T_g; /* soil temp at depth z_g (C) */ extern double ro; /* measured runoff (m/sec) */ /* other climate input */ extern double elevation; /* pixel elevation (m) */ extern double P_a; /* air pressure (Pa) */ /* measurement heights/depths */ extern int relative_hts; /* TRUE if measurements heights, z_T and z_u, are relative to snow surface; FALSE if they are absolute heights above the ground */ extern double z_g; /* depth of soil temp meas (m) */ extern double z_u; /* height of wind measurement (m) */ extern double z_T; /* height of air temp & vapor pressure measurement (m) */ extern double z_0; /* roughness length */ /* precipitation info for the current DATA timestep */ extern int precip_now; /* precipitation occur for current timestep? */ extern double m_pp; /* specific mass of total precip (kg/m^2) */ extern double percent_snow; /* % of total mass that's snow (0 to 1.0) */ extern double rho_snow; /* density of snowfall (kg/m^3) */ extern double T_pp; /* precip temp (C) */ extern double T_rain; /* rain's temp (K) */ extern double T_snow; /* snowfall's temp (K) */ extern double h2o_sat_snow; /* snowfall's % of liquid H2O saturation */ /* precipitation info adjusted for current run timestep */ extern double m_precip; /* specific mass of total precip (kg/m^2) */ extern double m_rain; /* " " of rain in precip (kg/m^2) */ extern double m_snow; /* " " " snow " " (kg/m^2) */ extern double z_snow; /* depth of snow in precip (m) */ /* energy balance info for current timestep */ extern double R_n; /* net allwave radiation (W/m^2) */ extern double H; /* sensible heat xfr (W/m^2) */ extern double L_v_E; /* latent heat xfr (W/m^2) */ extern double G; /* heat xfr by conduction & diffusion from soil to snowcover (W/m^2) */ extern double G_0; /* heat xfr by conduction & diffusion from soil or lower layer to active layer (W/m^2) */ extern double M; /* advected heat from precip (W/m^2) */ extern double delta_Q; /* change in snowcover's energy (W/m^2) */ extern double delta_Q_0; /* change in active layer's energy (W/m^2) */ /* averages of energy balance vars since last output record */ extern double R_n_bar; extern double H_bar; extern double L_v_E_bar; extern double G_bar; extern double G_0_bar; extern double M_bar; extern double delta_Q_bar; extern double delta_Q_0_bar; /* mass balance vars for current timestep */ extern double melt; /* specific melt (kg/m^2 or m) */ extern double E; /* mass flux by evap into air from active layer (kg/m^2/s) */ extern double E_s; /* mass of evap into air & soil from snowcover (kg/m^2) */ extern double ro_predict; /* predicted specific runoff (m/sec) */ /* sums of mass balance vars since last output record */ extern double melt_sum; extern double E_s_sum; extern double ro_pred_sum; #pragma omp threadprivate(elevation, run_no_snow, stop_no_snow, max_z_s_0, max_h2o_vol, tstep_info, time_step, current_time, time_since_out, \ layer_count, z_s, z_s_0, z_s_l, rho, m_s, m_s_0, m_s_l, T_s, T_s_0, T_s_l, cc_s, cc_s_0, cc_s_l, h2o_sat, \ h2o_vol, h2o, h2o_max, h2o_total, ro_data, input_rec1, input_rec2, S_n, I_lw, T_a, e_a, u, T_g, ro, \ P_a, relative_hts, z_g, z_u, z_T, z_0, precip_now, m_pp, percent_snow, rho_snow, T_pp, T_rain, T_snow, \ h2o_sat_snow, m_precip, m_rain, m_snow, z_snow, R_n, H, L_v_E, G, G_0, M, delta_Q, delta_Q_0, R_n_bar, \ H_bar, L_v_E_bar, G_bar, G_0_bar, M_bar, delta_Q_bar, delta_Q_0_bar, melt, E, E_s, ro_predict, \ melt_sum, E_s_sum, ro_pred_sum, out_func) #endif /* _SNOBAL_H_ */
par_lr_interp.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "aux_interp.h" #define MAX_C_CONNECTIONS 100 #define HAVE_COMMON_C 1 /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildStdInterp * Comment: The interpolatory weighting can be changed with the sep_weight * variable. This can enable not separating negative and positive * off diagonals in the weight formula. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int sep_weight, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /* HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; //HYPRE_BigInt *found; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; HYPRE_Int *ihat = NULL; HYPRE_Int *ihat_offd = NULL; HYPRE_Int *ipnt = NULL; HYPRE_Int *ipnt_offd = NULL; HYPRE_Int strong_f_marker = -2; /* Interpolation weight variables */ HYPRE_Real *ahat = NULL; HYPRE_Real *ahat_offd = NULL; HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C; HYPRE_Real diagonal, distribute; HYPRE_Real alfa = 1.; HYPRE_Real beta = 1.; /* Loop variables */ // HYPRE_Int index; HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, j1, jj, kk, k1; HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx; HYPRE_BigInt big_k1; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; HYPRE_Real wall_1 = 0; HYPRE_Real wall_2 = 0; HYPRE_Real wall_3 = 0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag== 4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 0); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) { P_offd_i[i] = jj_counter_offd; } if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] >= 0) { if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (CF_marker_offd[loc_col] >= 0) { if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if (num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } /* Initialize ahat, which is a modification to a, used in the standard * interpolation routine. */ if (n_fine) { ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST); ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; ahat[i] = 0; ihat[i] = -1; } for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1; ahat_offd[i] = 0; ihat_offd[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if (num_procs > 1) { jj_begin_row_offd = jj_counter_offd; } /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { if (debug_flag==4) wall_time = time_getWallclockSeconds(); strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = i1; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = k1; P_diag_data[jj_counter] = zero; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd]=i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] >= 0) { if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = loc_col; P_diag_data[jj_counter] = zero; jj_counter++; } } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (CF_marker_offd[loc_col] >= 0) { if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_1 += wall_time; fflush(NULL); } if (debug_flag==4) { wall_time = time_getWallclockSeconds(); } cnt_c = 0; cnt_f = jj_end_row-jj_begin_row; cnt_c_offd = 0; cnt_f_offd = jj_end_row_offd-jj_begin_row_offd; ihat[i] = cnt_f; ipnt[cnt_f] = i; ahat[cnt_f++] = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is direct neighbor */ i1 = A_diag_j[jj]; if (P_marker[i1] != strong_f_marker) { indx = ihat[i1]; if (indx > -1) { ahat[indx] += A_diag_data[jj]; } else if (P_marker[i1] >= jj_begin_row) { ihat[i1] = cnt_c; ipnt[cnt_c] = i1; ahat[cnt_c++] += A_diag_data[jj]; } else if (CF_marker[i1] != -3) { ihat[i1] = cnt_f; ipnt[cnt_f] = i1; ahat[cnt_f++] += A_diag_data[jj]; } } else { if (num_functions == 1 || dof_func[i] == dof_func[i1]) { distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]]; for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++) { k1 = A_diag_j[kk]; indx = ihat[k1]; if (indx > -1) ahat[indx] -= A_diag_data[kk]*distribute; else if (P_marker[k1] >= jj_begin_row) { ihat[k1] = cnt_c; ipnt[cnt_c] = k1; ahat[cnt_c++] -= A_diag_data[kk]*distribute; } else { ihat[k1] = cnt_f; ipnt[cnt_f] = k1; ahat[cnt_f++] -= A_diag_data[kk]*distribute; } } if (num_procs > 1) { for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++) { k1 = A_offd_j[kk]; indx = ihat_offd[k1]; if (num_functions == 1 || dof_func[i1] == dof_func_offd[k1]) { if (indx > -1) { ahat_offd[indx] -= A_offd_data[kk]*distribute; } else if (P_marker_offd[k1] >= jj_begin_row_offd) { ihat_offd[k1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = k1; ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute; } else { ihat_offd[k1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = k1; ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute; } } } } } } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] != strong_f_marker) { indx = ihat_offd[i1]; if (indx > -1) ahat_offd[indx] += A_offd_data[jj]; else if (P_marker_offd[i1] >= jj_begin_row_offd) { ihat_offd[i1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = i1; ahat_offd[cnt_c_offd++] += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { ihat_offd[i1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = i1; ahat_offd[cnt_f_offd++] += A_offd_data[jj]; } } else { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]]; for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++) { big_k1 = A_ext_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /*diag*/ loc_col = (HYPRE_Int)(big_k1 - col_1); indx = ihat[loc_col]; if (indx > -1) { ahat[indx] -= A_ext_data[kk]*distribute; } else if (P_marker[loc_col] >= jj_begin_row) { ihat[loc_col] = cnt_c; ipnt[cnt_c] = loc_col; ahat[cnt_c++] -= A_ext_data[kk]*distribute; } else { ihat[loc_col] = cnt_f; ipnt[cnt_f] = loc_col; ahat[cnt_f++] -= A_ext_data[kk]*distribute; } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (num_functions == 1 || dof_func_offd[loc_col] == dof_func_offd[i1]) { indx = ihat_offd[loc_col]; if (indx > -1) { ahat_offd[indx] -= A_ext_data[kk]*distribute; } else if (P_marker_offd[loc_col] >= jj_begin_row_offd) { ihat_offd[loc_col] = cnt_c_offd; ipnt_offd[cnt_c_offd] = loc_col; ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute; } else { ihat_offd[loc_col] = cnt_f_offd; ipnt_offd[cnt_f_offd] = loc_col; ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute; } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_2 += wall_time; fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); diagonal = ahat[cnt_c]; ahat[cnt_c] = 0; sum_pos = 0; sum_pos_C = 0; sum_neg = 0; sum_neg_C = 0; sum = 0; sum_C = 0; if (sep_weight == 1) { for (jj=0; jj < cnt_c; jj++) { if (ahat[jj] > 0) { sum_pos_C += ahat[jj]; } else { sum_neg_C += ahat[jj]; } } if (num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos_C += ahat_offd[jj]; } else { sum_neg_C += ahat_offd[jj]; } } } sum_pos = sum_pos_C; sum_neg = sum_neg_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { if (ahat[jj] > 0) { sum_pos += ahat[jj]; } else { sum_neg += ahat[jj]; } ahat[jj] = 0; } if (num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos += ahat_offd[jj]; } else { sum_neg += ahat_offd[jj]; } ahat_offd[jj] = 0; } } if (sum_neg_C*diagonal != 0) { alfa = sum_neg/sum_neg_C/diagonal; } if (sum_pos_C*diagonal != 0) { beta = sum_pos/sum_pos_C/diagonal; } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; if (ahat[j1] > 0) { P_diag_data[jj] = -beta*ahat[j1]; } else { P_diag_data[jj] = -alfa*ahat[j1]; } P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) { ihat[ipnt[jj]] = -1; } if (num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; if (ahat_offd[j1] > 0) { P_offd_data[jj] = -beta*ahat_offd[j1]; } else { P_offd_data[jj] = -alfa*ahat_offd[j1]; } ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) { ihat_offd[ipnt_offd[jj]] = -1; } } } else { for (jj=0; jj < cnt_c; jj++) { sum_C += ahat[jj]; } if (num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { sum_C += ahat_offd[jj]; } } sum = sum_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { sum += ahat[jj]; ahat[jj] = 0; } if (num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { sum += ahat_offd[jj]; ahat_offd[jj] = 0; } } if (sum_C*diagonal != 0) { alfa = sum/sum_C/diagonal; } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; P_diag_data[jj] = -alfa*ahat[j1]; P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) { ihat[ipnt[jj]] = -1; } if (num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; P_offd_data[jj] = -alfa*ahat_offd[j1]; ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) { ihat_offd[ipnt_offd[jj]] = -1; } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_3 += wall_time; fflush(NULL); } } } if (debug_flag==4) { hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n", my_id, wall_1, wall_2, wall_3); fflush(NULL); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) { if (CF_marker[i] == -3) { CF_marker[i] = -1; } } *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(ahat, HYPRE_MEMORY_HOST); hypre_TFree(ihat, HYPRE_MEMORY_HOST); hypre_TFree(ipnt, HYPRE_MEMORY_HOST); if (full_off_procNodes) { hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST); hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST); hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST); } if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) { hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); } hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtPIInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtPIInterpHost(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime(); #endif /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; HYPRE_Int sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /* Threading variables */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int * diag_offset; HYPRE_Int * fine_to_coarse_offset; HYPRE_Int * offd_offset; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } /* This function is smart enough to check P_marker and P_marker_offd only, * and set them if they are not NULL. The other vectors are set regardless.*/ hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); /*----------------------------------------------------------------------- * Initialize threading variables *-----------------------------------------------------------------------*/ max_num_threads[0] = hypre_NumThreads(); diag_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); fine_to_coarse_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); offd_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i=0; i < max_num_threads[0]; i++) { diag_offset[i] = 0; fine_to_coarse_offset[i] = 0; offd_offset[i] = 0; } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,start,stop,coarse_counter,jj_counter,jj_counter_offd, P_marker, P_marker_offd,jj,kk,i1,k1,loc_col,jj_begin_row,jj_begin_row_offd,jj_end_row,jj_end_row_offd,diagonal,sum,sgn,jj1,i2,distribute,strong_f_marker, big_k1) #endif { /* Parallelize by computing only over each thread's range of rows. * * The first large for loop computes ~locally~ for each thread P_diag_i, * P_offd_i and fine_to_coarse. Then, the arrays are stitched together * For eaxample the first phase would compute * P_diag_i = [0, 2, 4, 7, 2, 5, 6] * for two threads. P_diag_i[stop] points to the end of that * thread's data, but P_diag_i[start] points to the end of the * previous thread's row range. This is then stitched together at the * end to yield, * P_diag_i = [0, 2, 4, 7, 9, 14, 15]. * * The second large for loop computes interpolation weights and is * relatively straight-forward to thread. */ /* initialize thread-wise variables */ strong_f_marker = -2; coarse_counter = 0; jj_counter = start_indexing; jj_counter_offd = start_indexing; if (n_fine) { P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1;} } /* this thread's row range */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); start = (n_fine/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine/num_threads)*(my_thread_num+1); } /* loop over rows */ /* This loop counts the number of elements in P */ /* is done by counting the elmements in the index set C-hat */ for (i = start; i < stop; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { /* row in P corresponding to a coarse pt., will only require one element (1 on the diagonal). */ jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } /*----------------------------------------------------------------------- * End loop over fine grid. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif P_diag_i[stop] = jj_counter; P_offd_i[stop] = jj_counter_offd; fine_to_coarse_offset[my_thread_num] = coarse_counter; diag_offset[my_thread_num] = jj_counter; offd_offset[my_thread_num] = jj_counter_offd; /* Stitch P_diag_i, P_offd_i and fine_to_coarse together */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { /* Calculate the offset for P_diag_i and P_offd_i for each thread */ for (i = 1; i < num_threads; i++) { diag_offset[i] = diag_offset[i-1] + diag_offset[i]; fine_to_coarse_offset[i] = fine_to_coarse_offset[i-1] + fine_to_coarse_offset[i]; offd_offset[i] = offd_offset[i-1] + offd_offset[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { /* update row pointer array with offset, * making sure to update the row stop index */ for (i = start+1; i <= stop; i++) { P_diag_i[i] += diag_offset[my_thread_num-1]; P_offd_i[i] += offd_offset[my_thread_num-1]; } /* update fine_to_coarse by offsetting with the offset * from the preceding thread */ for (i = start; i < stop; i++) { if (fine_to_coarse[i] >= 0) { fine_to_coarse[i] += fine_to_coarse_offset[my_thread_num-1]; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) wall_time = time_getWallclockSeconds(); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } } /* Fine to coarse mapping */ if (num_procs > 1 && my_thread_num == 0) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = start; i < stop; i++) { jj_begin_row = P_diag_i[i]; jj_begin_row_offd = P_offd_i[i]; jj_counter = jj_begin_row; jj_counter_offd = jj_begin_row_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if (P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. */ for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if ((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if (sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; if (i2 == i && (sgn*A_diag_data[jj1]) < 0) diagonal += distribute*A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row || loc_col == i) sum += A_ext_data[jj1]; } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if (sum != 0) { distribute = A_offd_data[jj] / sum; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; if (loc_col == i) diagonal += distribute*A_ext_data[jj1]; } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } /*----------------------------------------------------------------------- * End large for loop over nfine *-----------------------------------------------------------------------*/ if (n_fine) { hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } } /*----------------------------------------------------------------------- * End PAR_REGION *-----------------------------------------------------------------------*/ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime(); #endif P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_fine; i++) { if (CF_marker[i] == -3) { CF_marker[i] = -1; } } *P_ptr = P; /* Deallocate memory */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(diag_offset, HYPRE_MEMORY_HOST); hypre_TFree(offd_offset, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offset, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) { hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); } hypre_MatvecCommPkgDestroy(extend_comm_pkg); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtPICCInterp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtPICCInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /*HYPRE_Int **ext_p, **ext_p_offd;*/ /*HYPRE_Int ccounter_offd; HYPRE_Int *clist_offd;*/ HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; HYPRE_Int sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /*HYPRE_Int ccounter; HYPRE_Int *clist, ccounter;*/ /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } /*clist = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS); for (i = 0; i < MAX_C_CONNECTIONS; i++) clist[i] = 0; if (num_procs > 1) { clist_offd = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS, HYPRE_MEMORY_HOST); for (i = 0; i < MAX_C_CONNECTIONS; i++) clist_offd[i] = 0; }*/ hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /* Initialize ccounter for each f point */ /*ccounter = 0; ccounter_offd = 0;*/ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { /*clist[ccounter++] = i1;*/ P_marker[i1] = jj_counter; jj_counter++; } } } /*qsort0(clist,0,ccounter-1);*/ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < P_offd_i[i]) { /*clist_offd[ccounter_offd++] = i1;*/ tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } /*qsort0(clist_offd,0,ccounter_offd-1);*/ } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] == -1) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { /*if (hypre_BinarySearch(clist,k1,ccounter) >= 0) {*/ common_c = 1; break; /*kk = S_diag_i[i1+1]; }*/ } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ /*if (hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = S_offd_i[i1+1]; }*/ } } } if (!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; /*break;*/ } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; /*break;*/ } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] == -1) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { /*if (hypre_BinarySearch(clist,loc_col,ccounter) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } else { loc_col = (HYPRE_BigInt)(-big_k1 - 1); if (CF_marker_offd[loc_col] == 2) { /*if (hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; /*break;*/ } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; /*break;*/ } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ i1 = S_offd_j[jj]; if (CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*ccounter = start_indexing; ccounter_offd = start_indexing;*/ /* Fine to coarse mapping */ if (num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if (num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /*ccounter = 0; ccounter_offd = 0;*/ strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; /*clist[ccounter++] = i1;*/ } } } /*qsort0(clist,0,ccounter-1);*/ if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*clist_offd[ccounter_offd++] = i1;*/ } } } /*qsort0(clist_offd,0,ccounter_offd-1);*/ } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if (CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { /*if (hypre_BinarySearch(clist,k1,ccounter) >= 0) {*/ common_c = 1; break; /*kk = S_diag_i[i1+1]; }*/ } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ /*if (hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = S_offd_i[i1+1]; }*/ } } } if (!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; /*break;*/ } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*break;*/ } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { /*if (hypre_BinarySearch(clist,loc_col,ccounter) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (CF_marker_offd[loc_col] == 2) { /*if (hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; /*break;*/ } } else { loc_col = (-big_k1 - 1); if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*break;*/ } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if (P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if ((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if (sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; if (i2 == i && (sgn*A_diag_data[jj1]) < 0) diagonal += distribute*A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; sgn = 1; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row || loc_col == i) sum += A_ext_data[jj1]; } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if (sum != 0) { distribute = A_offd_data[jj] / sum; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; if (loc_col == i) diagonal += distribute*A_ext_data[jj1]; } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); /*hypre_TFree(clist);*/ if (num_procs > 1) { /*hypre_TFree(clist_offd);*/ hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildFFInterp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildFFInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /*HYPRE_Int ccounter_offd;*/ HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; HYPRE_Int sgn = 1; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /*HYPRE_Int ccounter; HYPRE_Int *clist, ccounter;*/ /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else { /* Initialize ccounter for each f point */ /*ccounter = 0; ccounter_offd = 0;*/ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] < 0) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { common_c = 1; break; } } } if (!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] < 0) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ i1 = S_offd_j[jj]; if (CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*ccounter = start_indexing; ccounter_offd = start_indexing;*/ /* Fine to coarse mapping */ if (num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ jj_begin_row_offd = 0; for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if (num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /*ccounter = 0; ccounter_offd = 0;*/ strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if (CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { common_c = 1; break; } } } if (!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if (P_marker[i1] == strong_f_marker) { sum = zero; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if (sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) sum += A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if (sum != 0) { distribute = A_offd_data[jj] / sum; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildFF1Interp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildFF1Interp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /*HYPRE_Int ccounter_offd;*/ HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; HYPRE_Int sgn = 1; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /*HYPRE_Int ccounter;*/ HYPRE_Int found_c = 0; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else { /* Initialize ccounter for each f point */ /*ccounter = 0; ccounter_offd = 0;*/ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] < 0) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ common_c = 1; break; } } } if (!common_c) { /* No common c point, extend the interp set */ found_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; found_c = 1; break; } } } if (num_procs > 1 && !found_c) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; break; } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] < 0) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; break; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ i1 = S_offd_j[jj]; if (CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*ccounter = start_indexing; ccounter_offd = start_indexing;*/ /* Fine to coarse mapping */ if (num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ jj_begin_row_offd = 0; for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if (num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /*ccounter = 0; ccounter_offd = 0;*/ strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if (CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ common_c = 1; break; } } } if (!common_c) { /* No common c point, extend the interp set */ found_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; found_c = 1; break; } } } if (num_procs > 1 && !found_c) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; break; } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; break; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if (P_marker[i1] == strong_f_marker) { sum = zero; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if (sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) sum += A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if (sum != 0) { distribute = A_offd_data[jj] / sum; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = - (HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); /*hynre_TFree(clist);*/ if (num_procs > 1) { /*hypre_TFree(clist_offd);*/ hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtInterpHost(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; HYPRE_Int sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) { P_offd_i[i] = jj_counter_offd; } if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) { wall_time = time_getWallclockSeconds(); } P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if (num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if (P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; } /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if ((P_marker[i2] >= jj_begin_row ) && (sgn*A_diag_data[jj1]) < 0) { sum += A_diag_data[jj1]; } } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { sum += A_offd_data[jj1]; } } } if (sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) { diagonal += A_diag_data[jj]; } } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; } else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row ) { sum += A_ext_data[jj1]; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) { sum += A_ext_data[jj1]; } } } if (sum != 0) { distribute = A_offd_data[jj] / sum; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) { P_diag_data[P_marker[loc_col]] += distribute * A_ext_data[jj1]; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[loc_col]] += distribute*A_ext_data[jj1]; } } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { diagonal += A_offd_data[jj]; } } } } if (diagonal) { for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= -diagonal; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= -diagonal; } } } strong_f_marker--; } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) { if (CF_marker[i] == -3) { CF_marker[i] = -1; } } *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) { hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); } hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGBuildExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ExtInterp"); #endif HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); HYPRE_Int ierr = 0; if (exec == HYPRE_EXEC_HOST) { ierr = hypre_BoomerAMGBuildExtInterpHost(A,CF_marker,S,num_cpts_global,num_functions,dof_func, debug_flag,trunc_factor,max_elmts,P_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) else { ierr = hypre_BoomerAMGBuildExtInterpDevice(A,CF_marker,S,num_cpts_global,num_functions,dof_func, debug_flag,trunc_factor,max_elmts,P_ptr); } #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /*-----------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ExtPIInterp"); #endif HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); HYPRE_Int ierr = 0; if (exec == HYPRE_EXEC_HOST) { ierr = hypre_BoomerAMGBuildExtPIInterpHost(A, CF_marker, S, num_cpts_global, num_functions, dof_func, debug_flag, trunc_factor, max_elmts, P_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) else { ierr = hypre_BoomerAMGBuildExtPIInterpDevice(A, CF_marker, S, num_cpts_global, num_functions, dof_func, debug_flag, trunc_factor, max_elmts, P_ptr); } #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; }
core32.c
#undef DT32 #define DT32 //<- This should be the ONLY difference between core32 and core64! #ifdef DT32 #define flt float #define DT_CALC DT_FLOAT32 #define epsilon FLT_EPSILON #else #define flt double #define DT_CALC DT_FLOAT64 #define epsilon DBL_EPSILON #endif #include <stdio.h> #include <stdlib.h> #include <nifti2_io.h> #include <float.h> //FLT_EPSILON #ifdef __aarch64__ #include "arm_malloc.h" #else #include <immintrin.h> #endif #include <limits.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif #include "core.h" #define bandpass #ifdef bandpass #include "bw.h" #endif //#define slicetimer //tensor_decomp support is optional #ifdef slicetimer #include "afni.h" #endif #define tensor_decomp //tensor_decomp support is optional #ifdef tensor_decomp #include "tensor.h" #endif //#define TFCE //formerly we used Christian Gaser's tfce, new bespoke code handles connectivity //#ifdef TFCE //we now use in-built tfce function // #include "tfce_pthread.h" //#endif static int show_helpx( void ) { printf("Fatal: show_help shown by wrapper function\n"); exit(1); } static flt vx(flt * f, int p, int q) { if ((f[p] == INFINITY) || (f[q] == INFINITY)) return INFINITY; else return ((f[q] + q*q) - (f[p] + p*p)) / (2.0*q - 2.0*p); } static void edt(flt * f, int n) { int q, p, k; flt s, dx; flt * d = (flt *)_mm_malloc((n)*sizeof(flt), 64); flt * z = (flt *)_mm_malloc((n)*sizeof(flt), 64); int * v = (int *)_mm_malloc((n)*sizeof(int), 64); /*# Find the lower envelope of a sequence of parabolas. # f...source data (returns the Y of the parabola vertex at X) # d...destination data (final distance values are written here) # z...temporary used to store X coords of parabola intersections # v...temporary used to store X coords of parabola vertices # i...resulting X coords of parabola vertices # n...number of pixels in "f" to process # Always add the first pixel to the enveloping set since it is # obviously lower than all parabolas processed so far.*/ k = 0; v[0] = 0; z[0] = -INFINITY; z[1] = INFINITY; for (q = 1; q < n; q++ ) { /* If the new parabola is lower than the right-most parabola in # the envelope, remove it from the envelope. To make this # determination, find the X coordinate of the intersection (s) # between the parabolas with vertices at (q,f[q]) and (p,f[p]).*/ p = v[k]; s = vx(f, p,q); while (s <= z[k]) { k = k - 1; p = v[k]; s = vx(f, p,q); } //# Add the new parabola to the envelope. k = k + 1; v[k] = q; z[k] = s; z[k + 1] = INFINITY; } /*# Go back through the parabolas in the envelope and evaluate them # in order to populate the distance values at each X coordinate.*/ k = 0; for (q = 0; q < n; q++ ) { while (z[k + 1] < q) k = k + 1; dx = (q - v[k]); d[q] = dx * dx + f[v[k]]; } for (q = 0; q < n; q++ ) f[q] = d[q]; _mm_free (d); _mm_free (z); _mm_free (v); } static void edt1(flt * df, int n) { //first dimension is simple int q, prevX; flt prevY, v; prevX = 0; prevY = INFINITY; //forward for (q = 0; q < n; q++ ) { if (df[q] == 0) { prevX = q; prevY = 0; } else df[q] = sqr(q-prevX)+prevY; } //reverse prevX = n; prevY = INFINITY; for (q = (n-1); q >= 0; q-- ) { v = sqr(q-prevX)+prevY; if (df[q] < v) { prevX = q; prevY = df[q]; } else df[q] = v; } } static int nifti_edt(nifti_image * nim) { //https://github.com/neurolabusc/DistanceFields if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; flt * img = (flt *) nim->data; //int nVol = 1; //for (int i = 4; i < 8; i++ ) // nVol *= MAX(nim->dim[i],1); int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); int nVol = nim->nvox / nvox3D; if ((nvox3D * nVol) != nim->nvox) return 1; int nx = nim->nx; int ny = nim->ny; int nz = nim->nz; flt threshold = 0.0; for (size_t i = 0; i < nim->nvox; i++ ) { if (img[i] > threshold) img[i] = INFINITY; else img[i] = 0; } size_t nRow = 1; for (int i = 2; i < 8; i++ ) nRow *= MAX(nim->dim[i],1); //EDT in left-right direction for (int r = 0; r < nRow; r++ ) { flt * imgRow = img + (r * nx); edt1(imgRow, nx); } //EDT in anterior-posterior direction nRow = nim->nx * nim->nz; //transpose XYZ to YXZ and blur Y columns with XZ Rows for (int v = 0; v < nVol; v++ ) { //transpose each volume separately flt * img3D = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //alloc for each volume to allow openmp //transpose data size_t vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { int zo = z * nx * ny; for (int y = 0; y < ny; y++ ) { int xo = 0; for (int x = 0; x < nx; x++ ) { img3D[zo+xo+y] = img[vo]; vo += 1; xo += ny; } } } //perform EDT for all rows for (int r = 0; r < nRow; r++ ) { flt * imgRow = img3D + (r * ny); edt(imgRow, ny); } //transpose data back vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { int zo = z * nx * ny; for (int y = 0; y < ny; y++ ) { int xo = 0; for (int x = 0; x < nx; x++ ) { img[vo] = img3D[zo+xo+y]; vo += 1; xo += ny; } } } _mm_free (img3D); } //for each volume //EDT in head-foot direction nRow = nim->nx * nim->ny; //transpose XYZ to ZXY and blur Z columns with XY Rows #pragma omp parallel for for (int v = 0; v < nVol; v++ ) { //transpose each volume separately flt * img3D = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //alloc for each volume to allow openmp //transpose data size_t vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { for (int y = 0; y < ny; y++ ) { int yo = y * nz * nx; int xo = 0; for (int x = 0; x < nx; x++ ) { img3D[z+xo+yo] = img[vo]; vo += 1; xo += nz; } } } //perform EDT for all "rows" for (int r = 0; r < nRow; r++ ) { flt * imgRow = img3D + (r * nz); edt(imgRow, nz); } //transpose data back vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { for (int y = 0; y < ny; y++ ) { int yo = y * nz * nx; int xo = 0; for (int x = 0; x < nx; x++ ) { img[vo] = img3D[z+xo+yo]; vo += 1; xo += nz; } //x } //y } //z _mm_free (img3D); } //for each volume return 0; } //Gaussian blur, both serial and parallel variants, https://github.com/neurolabusc/niiSmooth static void blurS(flt * img, int nx, int ny, flt xmm, flt Sigmamm) { //serial blur //make kernels if ((xmm == 0) || (nx < 2) || (ny < 1) || (Sigmamm <= 0.0)) return; //flt sigma = (FWHMmm/xmm)/sqrt(8*log(2)); flt sigma = (Sigmamm/xmm); //mm to vox //round(6*sigma), ceil(4*sigma) seems spot on larger than fslmaths //int cutoffvox = round(6*sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5 int cutoffvox = ceil(4*sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5 //printf(".Blur Cutoff (%g) %d\n", 4*sigma, cutoffvox); //validated on SPM12's 1.5mm isotropic mask_ICV.nii (discrete jump in number of non-zero voxels) //fslmaths mask -s 2.26 f6.nii //Blur Cutoff (6.02667) 7 //fslmaths mask -s 2.24 f4.nii //Blur Cutoff (5.97333) 6 cutoffvox = MAX(cutoffvox, 1); flt * k = (flt *)_mm_malloc((cutoffvox+1)*sizeof(flt), 64); //FIR Gaussian flt expd = 2*sigma*sigma; for (int i = 0; i <= cutoffvox; i++ ) k[i] = exp(-1.0f*(i*i)/expd); //calculate start, end for each voxel in int * kStart = (int *)_mm_malloc(nx*sizeof(int), 64); //-cutoff except left left columns, e.g. 0, -1, -2... cutoffvox int * kEnd = (int *)_mm_malloc(nx*sizeof(int), 64); //+cutoff except right columns flt * kWeight = (flt *)_mm_malloc(nx*sizeof(flt), 64); //ensure sum of kernel = 1.0 for (int i = 0; i < nx; i++ ) { kStart[i] = MAX(-cutoffvox, -i);//do not read below 0 kEnd[i] = MIN(cutoffvox, nx-i-1);//do not read beyond final columnn if ((i > 0) && (kStart[i] == (kStart[i-1])) && (kEnd[i] == (kEnd[i-1]))) { //reuse weight kWeight[i] = kWeight[i-1]; continue; } flt wt = 0.0f; for (int j = kStart[i]; j <= kEnd[i]; j++ ) wt += k[abs(j)]; kWeight[i] = 1 / wt; //printf("%d %d->%d %g\n", i, kStart[i], kEnd[i], kWeight[i]); } //apply kernel to each row flt * tmp = _mm_malloc(nx*sizeof(flt), 64); //input values prior to blur for (int y = 0; y < ny; y++ ) { //printf("-+ %d:%d\n", y, ny); memcpy(tmp, img, nx*sizeof(flt)); for (int x = 0; x < nx; x++ ) { flt sum = 0; for (int i = kStart[x]; i <= kEnd[x]; i++ ) sum += tmp[x+i] * k[abs(i)]; img[x] = sum * kWeight[x]; } img += nx; } //blurX //free kernel _mm_free (tmp); _mm_free (k); _mm_free (kStart); _mm_free (kEnd); _mm_free (kWeight); } #if defined(_OPENMP) static void blurP(flt * img, int nx, int ny, flt xmm, flt FWHMmm) { //parallel blur //make kernels if ((xmm == 0) || (nx < 2) || (ny < 1) || (FWHMmm <= 0.0)) return; //flt sigma = (FWHMmm/xmm)/sqrt(8*log(2)); flt sigma = (FWHMmm/xmm); //mm to vox int cutoffvox = round(6*sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5 cutoffvox = MAX(cutoffvox, 1); flt * k = (flt *)_mm_malloc((cutoffvox+1)*sizeof(flt), 64); //FIR Gaussian flt expd = 2*sigma*sigma; for (int i = 0; i <= cutoffvox; i++ ) k[i] = exp(-1.0f*(i*i)/expd); //calculate start, end for each voxel in int * kStart = (int *)_mm_malloc(nx*sizeof(int), 64); //-cutoff except left left columns, e.g. 0, -1, -2... cutoffvox int * kEnd = (int *)_mm_malloc(nx*sizeof(int), 64); //+cutoff except right columns flt * kWeight = (flt *)_mm_malloc(nx*sizeof(flt), 64); //ensure sum of kernel = 1.0 for (int i = 0; i < nx; i++ ) { kStart[i] = MAX(-cutoffvox, -i);//do not read below 0 kEnd[i] = MIN(cutoffvox, nx-i-1);//do not read beyond final columnn if ((i > 0) && (kStart[i] == (kStart[i-1])) && (kEnd[i] == (kEnd[i-1]))) { //reuse weight kWeight[i] = kWeight[i-1]; continue; } flt wt = 0.0f; for (int j = kStart[i]; j <= kEnd[i]; j++ ) wt += k[abs(j)]; kWeight[i] = 1 / wt; //printf("%d %d->%d %g\n", i, kStart[i], kEnd[i], kWeight[i]); } //apply kernel to each row #pragma omp parallel for for (int y = 0; y < ny; y++ ) { flt * tmp = _mm_malloc(nx*sizeof(flt), 64); //input values prior to blur flt * imgx = img; imgx += (nx * y); memcpy(tmp, imgx, nx*sizeof(flt)); for (int x = 0; x < nx; x++ ) { flt sum = 0; for (int i = kStart[x]; i <= kEnd[x]; i++ ) sum += tmp[x+i] * k[abs(i)]; imgx[x] = sum * kWeight[x]; } _mm_free (tmp); } //free kernel _mm_free (k); _mm_free (kStart); _mm_free (kEnd); _mm_free (kWeight); } //blurP #endif static int nifti_smooth_gauss(nifti_image * nim, flt SigmammX, flt SigmammY, flt SigmammZ) { //https://github.com/afni/afni/blob/699775eba3c58c816d13947b81cf3a800cec606f/src/edt_blur.c if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; flt * img = (flt *) nim->data; //int nVol = 1; //for (int i = 4; i < 8; i++ ) // nVol *= MAX(nim->dim[i],1); int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); int nVol = nim->nvox / nvox3D; if ((nvox3D * nVol) != nim->nvox) return 1; int nx = nim->nx; int ny = nim->ny; int nz = nim->nz; if (SigmammX <= 0.0) goto DO_Y_BLUR ; //BLUR X int nRow = 1; for (int i = 2; i < 8; i++ ) nRow *= MAX(nim->dim[i],1); #if defined(_OPENMP) //printf(">>>%d\n", omp_get_num_threads()); if (omp_get_max_threads() > 1) blurP(img, nim->nx, nRow, nim->dx, SigmammX); else blurS(img, nim->nx, nRow, nim->dx, SigmammX); #else blurS(img, nim->nx, nRow, nim->dx, SigmammX); #endif //blurX(img, nim->nx, nRow, nim->dx, SigmammX); DO_Y_BLUR: //BLUR Y if (SigmammY <= 0.0) goto DO_Z_BLUR ; nRow = nim->nx * nim->nz; //transpose XYZ to YXZ and blur Y columns with XZ Rows #pragma omp parallel for for (int v = 0; v < nVol; v++ ) { //transpose each volume separately flt * img3D = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //alloc for each volume to allow openmp size_t vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { int zo = z * nx * ny; for (int y = 0; y < ny; y++ ) { int xo = 0; for (int x = 0; x < nx; x++ ) { img3D[zo+xo+y] = img[vo]; vo += 1; xo += ny; } } } blurS(img3D, nim->ny, nRow, nim->dy, SigmammY); vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { int zo = z * nx * ny; for (int y = 0; y < ny; y++ ) { int xo = 0; for (int x = 0; x < nx; x++ ) { img[vo] = img3D[zo+xo+y]; vo += 1; xo += ny; } } } _mm_free (img3D); } //for each volume DO_Z_BLUR: //BLUR Z: if ((SigmammZ <= 0.0) || (nim->nz < 2)) return 0; //all done! nRow = nim->nx * nim->ny; //transpose XYZ to ZXY and blur Z columns with XY Rows //#pragma omp parallel //#pragma omp for #pragma omp parallel for for (int v = 0; v < nVol; v++ ) { //transpose each volume separately flt * img3D = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //alloc for each volume to allow openmp size_t vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { for (int y = 0; y < ny; y++ ) { int yo = y * nz * nx; int xo = 0; for (int x = 0; x < nx; x++ ) { img3D[z+xo+yo] = img[vo]; vo += 1; xo += nz; } } } blurS(img3D, nz, nRow, nim->dz, SigmammZ); vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { for (int y = 0; y < ny; y++ ) { int yo = y * nz * nx; int xo = 0; for (int x = 0; x < nx; x++ ) { img[vo] = img3D[z+xo+yo]; vo += 1; xo += nz; } //x } //y } //z _mm_free (img3D); } //for each volume return 0; } static int nifti_otsu(nifti_image * nim, int ignoreZeroVoxels) { //binarize image using Otsu's method if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; flt * inimg = (flt *) nim->data; flt mn = INFINITY; //better that inimg[0] in case NaN flt mx = -INFINITY; for (int i = 0; i < nim->nvox; i++ ) { mn = MIN(mn, inimg[i]); mx = MAX(mx, inimg[i]); } if (mn >= mx) return 0; //no variability #define nBins 1001 flt scl = (nBins-1)/(mx-mn); int hist[nBins]; for (int i = 0; i < nBins; i++ ) hist[i] = 0; if (ignoreZeroVoxels) { for (int i = 0; i < nim->nvox; i++ ) { if (isnan(inimg[i])) continue; if (inimg[i] == 0.0) continue; hist[(int)round((inimg[i]-mn)*scl) ]++; } } else { for (int i = 0; i < nim->nvox; i++ ) { if (isnan(inimg[i])) continue; hist[(int)round((inimg[i]-mn)*scl) ]++; } } //https://en.wikipedia.org/wiki/Otsu%27s_method size_t total = 0; for (int i = 0; i < nBins; i++ ) total += hist[i]; int top = nBins - 1; int level = 0; double sumB = 0; double wB = 0; double maximum = 0.0; double sum1 = 0.0; for (int i = 0; i < nBins; i++ ) sum1 += (i * hist[i]); for (int ii = 0; ii < nBins; ii++ ) { double wF = total - wB; if ((wB > 0) && (wF > 0)) { double mF = (sum1 - sumB) / wF; double val = wB * wF * ((sumB / wB) - mF) * ((sumB / wB) - mF); if ( val >= maximum ) { level = ii; maximum = val; } } wB = wB + hist[ii]; sumB = sumB + (ii-1) * hist[ii]; } double threshold = (level / scl)+mn; if (ignoreZeroVoxels) { for (int i = 0; i < nim->nvox; i++ ) { if (inimg[i] == 0.0) continue; inimg[i] = (inimg[i] < threshold) ? 0.0 : 1.0; } } else { for (int i = 0; i < nim->nvox; i++ ) inimg[i] = (inimg[i] < threshold) ? 0.0 : 1.0; } //fprintf(stderr,"range %g..%g threshold %g bin %d\n", mn, mx, threshold, level); return 0; } static int nifti_unsharp(nifti_image * nim, flt SigmammX, flt SigmammY, flt SigmammZ, flt amount) { //https://github.com/afni/afni/blob/699775eba3c58c816d13947b81cf3a800cec606f/src/edt_blur.c if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; if (amount == 0.0) return 0; flt * inimg = (flt *) nim->data; void * indat = (void *) nim->data; flt mn = INFINITY; //better that inimg[0] in case NaN flt mx = -INFINITY; for (int i = 0; i < nim->nvox; i++ ) { mn = MIN(mn, inimg[i]); mx = MAX(mx, inimg[i]); } if (mn >= mx) return 0; //no variability size_t nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); size_t nVol = nim->nvox / nvox3D; if ((nvox3D * nVol) != nim->nvox) return 1; //process each 3D volume independently: reduce memory pressure nim->nvox = nvox3D; void * sdat = (void *)calloc(1,nim->nvox * sizeof(flt)) ; nim->data = sdat; flt * simg = (flt *) sdat; for (int v = 0; v < nVol; v++ ) { memcpy(simg, inimg, nim->nvox*sizeof(flt)); nifti_smooth_gauss(nim, SigmammX, SigmammY, SigmammZ); for (int i = 0; i < nim->nvox; i++ ) { //sharpened = original + (original - blurred) * amount inimg[i] += (inimg[i] - simg[i]) * amount; //keep in original range inimg[i] = MAX(inimg[i], mn); inimg[i] = MIN(inimg[i], mx); } inimg += nim->nvox; } free(sdat); //return original data nim->data = indat; nim->nvox = nvox3D * nVol; return 0; } //nifti_unsharp() static int nifti_crop(nifti_image * nim, int tmin, int tsize) { if (tsize == 0) { fprintf(stderr,"tsize must not be 0\n"); return 1; } if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); if ((nvox3D < 1) || ((nim->nvox % nvox3D) != 0) ) return 1; int nvol = (nim->nvox / nvox3D); //in if (nvol < 2) { fprintf(stderr,"crop only appropriate for 4D volumes"); return 1; } if (tmin >= nvol) { fprintf(stderr,"tmin must be from 0..%d, not %d\n", nvol-1, tmin); return 1; } int tminVol = MAX(0,tmin); int tFinalVol = tminVol+tsize-1; //e.g. if tmin=0 and tsize=1, tFinal=0 if (tsize < 0) { tFinalVol = INT_MAX; } tFinalVol = MIN(tFinalVol, nvol-1); if ((tminVol == 0) && (tFinalVol == (nvol-1)) ) return 0; int nvolOut = tFinalVol-tminVol+1; flt * imgIn = (flt *) nim->data; nim->nvox = nvox3D * nvolOut; void * dat = (void *)calloc(1,nim->nvox * sizeof(flt)) ; flt * imgOut = (flt *) dat; imgIn += tminVol * nvox3D; memcpy(imgOut, imgIn, nim->nvox*sizeof(flt)); free(nim->data); nim->data = dat; if (nvolOut == 1) nim->dim[0] = 3; else nim->dim[0] = 4; nim->ndim = nim->dim[0]; nim->dim[4] = nvolOut; nim->nt = nvolOut; nim->nu = 1; nim->nv = 1; nim->nw = 1; for (int i = 5; i < 8; i++ ) nim->dim[i] = 1; return 0; } static int nifti_rescale ( nifti_image * nim, double scale , double intercept) { //linear transform of data if (nim->nvox < 1) return 1; if (nim->datatype == DT_CALC) { flt scl = scale; flt inter = intercept; flt * f32 = (flt *) nim->data; for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = (f32[i] * scl) + inter; return 0; } fprintf(stderr,"nifti_rescale: Unsupported datatype %d\n", nim->datatype); return 1; } static int nifti_tfceS(nifti_image * nim, double H, double E, int c, int x, int y, int z, double tfce_thresh) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; if ((x < 0) || (x >= nim->dim[1]) || (y < 0) || (y >= nim->dim[2]) || (z < 0) || (z >= nim->dim[3])) { fprintf(stderr,"tfceS x/y/z must be in range 0..%"PRId64"/0..%"PRId64"/0..%"PRId64"\n", nim->dim[1]-1, nim->dim[2]-1, nim->dim[3]-1); } if (!neg_determ(nim)) x = nim->dim[1] - x - 1; int seed = x + (y * nim->dim[1]) + (z * nim->dim[1] * nim->dim[2]); flt * inimg = (flt *) nim->data; if (inimg[seed] < H) { fprintf(stderr,"it doesn't reach to specified threshold\n"); return 1; } size_t nvox3D = nim->dim[1]*nim->dim[2]*nim->dim[3]; if (nim->nvox > nvox3D) { fprintf(stderr,"tfceS not suitable for 4D data.\n"); return 1; } //printf("peak %g\n", inimg[seed]); int numk = c; if ((c != 6) && (c != 18) && (c != 26)) { fprintf(stderr,"suitable values for c are 6, 18 or 26\n"); numk = 6; } //set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap int32_t * k = (int32_t *)_mm_malloc(3*numk*sizeof(int32_t), 64); //kernel: offset, x, y int mxDx = 1; //connectivity 6: faces only if (numk == 18) mxDx = 2; //connectivity 18: faces+edges if (numk == 26) mxDx = 3; //connectivity 26: faces+edges+corners int j = 0; for (int z = -1; z <= 1; z++ ) for (int y = -1; y <= 1; y++ ) for (int x = -1; x <= 1; x++ ) { int dx = abs(x)+abs(y)+abs(z); if ((dx > mxDx) || (dx == 0)) continue; k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny); k[j+numk] = x; //avoid left-right wrap k[j+numk+numk] = x; //avoid anterior-posterior wrap j++; } //for x flt mx = (inimg[0]); for (size_t i = 0; i < nvox3D; i++ ) mx = MAX((inimg[i]),mx); double dh = mx/100.0; flt * outimg = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //output image int32_t * q = (int32_t *)_mm_malloc(nvox3D*sizeof(int32_t), 64); //queue with untested seed uint8_t * vxs = (uint8_t *)_mm_malloc(nvox3D*sizeof(uint8_t), 64); for (int i = 0; i < nvox3D; i++ ) outimg[i] = 0.0; int n_steps = (int)ceil(mx/dh); //for (int step=0; step<n_steps; step++) { for (int step=n_steps-1; step >= 0; step--) { flt thresh = (step+1)*dh; memset(vxs, 0, nvox3D*sizeof(uint8_t)); for (int i = 0; i < nvox3D; i++ ) if (inimg[i] >= thresh) vxs[i] = 1; //survives, unclustered int qlo = 0; int qhi = 0; q[qhi] = seed; //add starting voxel as seed in queue vxs[seed] = 0; //do not find again! while (qhi >= qlo) { //first in, first out queue //retire one seed, add 0..6, 0..18 or 0..26 new ones (depending on connectivity) for (int j = 0; j < numk; j++) { int jj = q[qlo] + k[j]; if ((jj < 0) || (jj >= nvox3D)) continue; //voxel in volume if (vxs[jj] == 0) continue; //already found or did not survive threshold int dx = x+k[j+numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+k[j+numk+numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior //add new seed: vxs[jj] = 0; //do not find again! qhi++; q[qhi] = jj; } qlo++; } //while qhi >= qlo: continue until all seeds tested flt valToAdd = pow(qhi+1, E) * pow(thresh, H); //"supporting section", Dark Gray in Figure 1 for (int j = 0; j <= qhi; j++) outimg[q[j]] += valToAdd; //printf("step %d thresh %g\n", step, outimg[seed]); if (outimg[seed] >= tfce_thresh) break; } //for each step if ( outimg[seed] < tfce_thresh) fprintf(stderr,"it doesn't reach to specified threshold (%g < %g)\n", outimg[seed], tfce_thresh); for (size_t i = 0; i < nvox3D; i++ ) if (outimg[i] == 0.0) inimg[i] = 0.0; _mm_free (q); _mm_free (vxs); _mm_free (outimg); _mm_free (k); return 0; } static int nifti_tfce(nifti_image * nim, double H, double E, int c) { //https://www.fmrib.ox.ac.uk/datasets/techrep/tr08ss1/tr08ss1.pdf if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; int numk = c; if ((c != 6) && (c != 18) && (c != 26)) { fprintf(stderr,"suitable values for c are 6, 18 or 26\n"); numk = 6; } //set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap int32_t * k = (int32_t *)_mm_malloc(3*numk*sizeof(int32_t), 64); //kernel: offset, x, y int mxDx = 1; //connectivity 6: faces only if (numk == 18) mxDx = 2; //connectivity 18: faces+edges if (numk == 26) mxDx = 3; //connectivity 26: faces+edges+corners int j = 0; for (int z = -1; z <= 1; z++ ) for (int y = -1; y <= 1; y++ ) for (int x = -1; x <= 1; x++ ) { int dx = abs(x)+abs(y)+abs(z); if ((dx > mxDx) || (dx == 0)) continue; k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny); k[j+numk] = x; //avoid left-right wrap k[j+numk+numk] = x; //avoid anterior-posterior wrap j++; } //for x //omp notes: here we compute each volume independently. // Christian Gaser computes the step loop in parallel, which accelerates 3D cases // This code is very quick on 3D, so this does not seem crucial, and avoids critical sections #pragma omp parallel for for (int vol = 0; vol < nvol; vol++ ) { //identify clusters flt * inimg = (flt *) nim->data; inimg += vol * nvox3D; flt mx = (inimg[0]); for (size_t i = 0; i < nvox3D; i++ ) mx = MAX((inimg[i]),mx); double dh = mx/100.0; flt * outimg = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //output image int32_t * q = (int32_t *)_mm_malloc(nvox3D*sizeof(int32_t), 64); //queue with untested seed uint8_t * vxs = (uint8_t *)_mm_malloc(nvox3D*sizeof(uint8_t), 64); for (int i = 0; i < nvox3D; i++ ) outimg[i] = 0.0; int n_steps = (int)ceil(mx/dh); for (int step=0; step<n_steps; step++) { flt thresh = (step+1)*dh; memset(vxs, 0, nvox3D*sizeof(uint8_t)); for (int i = 0; i < nvox3D; i++ ) if (inimg[i] >= thresh) vxs[i] = 1; //survives, unclustered int i = 0; for (int z = 0; z < nim->nz; z++ ) for (int y = 0; y < nim->ny; y++ ) for (int x = 0; x < nim->nx; x++ ) { if (vxs[i] == 0) { i++; continue; } //voxel did not survive or already clustered int qlo = 0; int qhi = 0; q[qhi] = i; //add starting voxel as seed in queue vxs[i] = 0; //do not find again! while (qhi >= qlo) { //first in, first out queue //retire one seed, add 0..6, 0..18 or 0..26 new ones (depending on connectivity) for (int j = 0; j < numk; j++) { int jj = q[qlo] + k[j]; if ((jj < 0) || (jj >= nvox3D)) continue; //voxel in volume if (vxs[jj] == 0) continue; //already found or did not survive threshold int dx = x+k[j+numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+k[j+numk+numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior //add new seed: vxs[jj] = 0; //do not find again! qhi++; q[qhi] = jj; } qlo++; } //while qhi >= qlo: continue until all seeds tested flt valToAdd = pow(qhi+1, E) * pow(thresh, H); //"supporting section", Dark Gray in Figure 1 for (int j = 0; j <= qhi; j++) outimg[q[j]] += valToAdd; i++; } //for each voxel } //for each step for (int i = 0; i < nvox3D; i++ ) inimg[i] = outimg[i]; _mm_free (q); _mm_free (vxs); _mm_free (outimg); } _mm_free (k); return 0; } //nifti_tfce() static int nifti_grid( nifti_image * nim, double v, int spacing) { if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2)) return 1; if (nim->datatype != DT_CALC) return 1; size_t nxy = (nim->nx * nim->ny); size_t nzt = nim->nvox / nxy; flt * f32 = (flt *) nim->data; flt fv = v; #pragma omp parallel for for (size_t i = 0; i < nzt; i++ ) { //for each 2D slices size_t so = i * nxy; //slice offset int z = (i % nim->nz); if ((nim->nz > 1) && ((z % spacing) == 0) ) { //whole slice is grid for (size_t j = 0; j < nxy; j++ ) f32[so++] = fv; continue; } for (size_t y = 0; y < nim->ny; y++ ) for (size_t x = 0; x < nim->nx; x++ ) { if ((x % spacing) == 0) f32[so] = fv; so ++; } so = i * nxy; //slice offset for (size_t y = 0; y < nim->ny; y++ ) for(size_t x = 0; x < nim->nx; x++ ) { if ((y % spacing) == 0) f32[so] = fv; so ++; } } //for i: each 2D slice return 0; } static int nifti_rem ( nifti_image * nim, double v, int isFrac) { //remainder (modulo) : fslmaths /*fmod(0.45, 2) = 0.45 : 0 fmod(0.9, 2) = 0.9 : 0 fmod(1.35, 2) = 1.35 : 1 fmod(1.8, 2) = 1.8 : 1 fmod(-0.45, 2) = -0.45 : 0 fmod(-0.9, 2) = -0.9 : 0 fmod(-1.35, 2) = -1.35 : -1 fmod(-1.8, 2) = -1.8 : -1 */ if (nim->datatype != DT_CALC) return 1; if (nim->nvox < 1) return 1; if (v == 0.0) { fprintf(stderr,"Exception: '-rem 0' does not make sense\n"); return 1; } flt fv = v; flt * f32 = (flt *) nim->data; if (isFrac) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = fmod(f32[i], fv); } else { for (size_t i = 0; i < nim->nvox; i++ ) { //printf("fmod(%g, %g) = %g : %g\n", f32[i], fv, fmod(f32[i],fv), trunc(fmod(f32[i],fv)) ); f32[i] = trunc(fmod(f32[i], fv)); } } return 0; } static int nifti_thr( nifti_image * nim, double v, int zeroBrightVoxels) { if (nim->nvox < 1) return 1; if (nim->datatype == DT_CALC) { flt fv = v; flt * f32 = (flt *) nim->data; if (zeroBrightVoxels) { for (size_t i = 0; i < nim->nvox; i++ ) if (f32[i] > fv) f32[i] = 0.0f; } else { for (size_t i = 0; i < nim->nvox; i++ ) if (f32[i] < fv) f32[i] = 0.0f; } return 0; } fprintf(stderr,"nifti_thr: Unsupported datatype %d\n", nim->datatype); return 1; } // nifti_thr() static int nifti_max( nifti_image * nim, double v, int useMin) { if (nim->nvox < 1) return 1; if (nim->datatype == DT_CALC) { flt fv = v; flt * f32 = (flt *) nim->data; if (useMin) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = fmin(f32[i], fv); } else { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = fmax(f32[i], fv); } return 0; } fprintf(stderr,"nifti_max: Unsupported datatype %d\n", nim->datatype); return 1; } // nifti_max() static int nifti_inm( nifti_image * nim, double M) { //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;bf9d21d2.1610 //With '-inm <value>', every voxel in the input volume is multiplied by <value> / M // where M is the mean across all voxels. //n.b.: regardless of description, mean appears to only include voxels > 0 if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); if ((nvox3D < 1) || ((nim->nvox % nvox3D) != 0) ) return 1; int nvol = nim->nvox / nvox3D; flt * f32 = (flt *) nim->data; #pragma omp parallel for for (int v = 0; v < nvol; v++ ) { size_t vi = v * nvox3D; double sum = 0.0; #define gt0 #ifdef gt0 int n = 0; for (size_t i = 0; i < nvox3D; i++ ) { if (f32[vi+i] > 0.0f) { n ++; sum += f32[vi+i]; } } if (sum == 0.0) continue; double ave = sum / n; #else for (int i = 0; i < nvox3D; i++ ) sum += f32[vi+i]; if (sum == 0.0) continue; double ave = sum / nvox3D; #endif //printf("%g %g\n", ave, M); flt scale = M / ave; for (int i = 0; i < nvox3D; i++ ) f32[vi+i] *= scale; } return 0; } // nifti_inm() static int nifti_ing( nifti_image * nim, double M) { //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;bf9d21d2.1610 //With '-inm <value>', every voxel in the input volume is multiplied by <value> / M // where M is the mean across all voxels. //n.b.: regardless of description, mean appears to only include voxels > 0 if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; flt * f32 = (flt *) nim->data; double sum = 0.0; int n = 0; for (size_t i = 0; i < nim->nvox; i++ ) { if (f32[i] > 0.0f) { n ++; sum += f32[i]; } } if (sum == 0) return 0; double ave = sum / n; flt scale = M / ave; #pragma omp parallel for for (int i = 0; i < nim->nvox; i++ ) f32[i] *= scale; return 0; } //nifti_ing() static int nifti_robust_range(nifti_image * nim, flt * pct2, flt * pct98, int ignoreZeroVoxels) { //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;31f309c1.1307 // robust range is essentially the 2nd and 98th percentiles // "but ensuring that the majority of the intensity range is captured, even for binary images." // fsl uses 1000 bins, also limits for volumes less than 100 voxels taylor.hanayik@ndcn.ox.ac.uk 20190107 //fslstats trick -r // 0.000000 1129.141968 //niimath >fslstats trick -R // 0.000000 2734.000000 *pct2 = 0.0; *pct98 = 1.0; if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; flt * f32 = (flt *) nim->data; flt mn = INFINITY; flt mx = -INFINITY; size_t nZero = 0; size_t nNan = 0; for (size_t i = 0; i < nim->nvox; i++ ) { if (isnan(f32[i])) { nNan ++; continue; } if ( f32[i] == 0.0 ) { nZero++; continue; } mn = fmin(f32[i],mn); mx = fmax(f32[i],mx); } if ((nZero > 0) && (mn > 0.0) && (!ignoreZeroVoxels)) mn = 0.0; if (mn > mx) return 0; //all NaN if (mn == mx) { *pct2 = mn; *pct98 = mx; return 0; } if (!ignoreZeroVoxels) nZero = 0; nZero += nNan; size_t n2pct = round((nim->nvox - nZero)* 0.02); if ((n2pct < 1) || (mn == mx) || ((nim->nvox -nZero) < 100) ) { //T Hanayik mentioned issue with very small volumes *pct2 = mn; *pct98 = mx; return 0; } #define nBins 1001 flt scl = (nBins-1)/(mx-mn); int hist[nBins]; for (int i = 0; i < nBins; i++ ) hist[i] = 0; if (ignoreZeroVoxels) { for (int i = 0; i < nim->nvox; i++ ) { if (isnan(f32[i])) continue; if (f32[i] == 0.0) continue; hist[(int)round((f32[i]-mn)*scl) ]++; } } else { for (int i = 0; i < nim->nvox; i++ ) { if (isnan(f32[i])) continue; hist[(int)round((f32[i]-mn)*scl) ]++; } } size_t n = 0; size_t lo = 0; while (n < n2pct) { n += hist[lo]; //if (lo < 10) // printf("%zu %zu %zu %d\n",lo, n, n2pct, ignoreZeroVoxels); lo++; } lo --; //remove final increment n = 0; int hi = nBins; while (n < n2pct) { hi--; n += hist[hi]; } /*if ((lo+1) < hi) { size_t nGray = 0; for (int i = lo+1; i < hi; i++ ) { nGray += hist[i]; //printf("%d %d\n", i, hist[i]); } float fracGray = (float)nGray/(float)(nim->nvox - nZero); printf("histogram[%d..%d] = %zu %g\n", lo, hi, nGray, fracGray); }*/ if (lo == hi) { //MAJORITY are not black or white int ok = -1; while (ok != 0) { if (lo > 0) { lo--; if (hist[lo] > 0) ok = 0; } if ((ok != 0) && (hi < (nBins-1))) { hi++; if (hist[hi] > 0) ok = 0; } if ((lo == 0) && (hi == (nBins-1))) ok = 0; } //while not ok } //if lo == hi *pct2 = (lo)/scl + mn; *pct98 = (hi)/scl + mn; printf("full range %g..%g (voxels 0 or NaN =%zu) robust range %g..%g\n", mn, mx, nZero, *pct2, *pct98); return 0; } enum eDimReduceOp{Tmean,Tstd,Tmax,Tmaxn,Tmin,Tmedian,Tperc,Tar1}; static int compare (const void * a, const void * b) { flt fa = *(const flt*) a; flt fb = *(const flt*) b; return (fa > fb) - (fa < fb); } static void dtrend(flt * xx, int npt, int pt0) { //linear detrend, first point is set to zero // if pt0=0 then mean is zero, pt0=1 then first point is zero, if pt0=2 final point is zero double t1,t3,t10 , x0,x1 ; int ii ; if( npt < 2 || xx == NULL ) return ; x0 = xx[0] ; x1 = 0.0 ; for( ii=1 ; ii < npt ; ii++ ){ x0 += xx[ii] ; x1 += xx[ii] * ii ; } t1 = npt*x0; t3 = 1.0/npt; t10 = npt*npt; double f0 = (double)(2.0/(npt+1.0)*t3*(2.0*t1-3.0*x1-x0)); double f1 = (double)(-6.0/(t10-1.0)*t3*(-x0-2.0*x1+t1)); //printf("%.8g %.8g %g\n", f0, f1, xx[0]); if (pt0 == 1) f0 = xx[0]; if (pt0 == 2) f0 = xx[npt-1]- (f1*(npt-1)); for( ii=0 ; ii < npt ; ii++ ) xx[ii] -= (f0 + f1*ii) ; } static int nifti_detrend_linear(nifti_image * nim) { if (nim->datatype != DT_CALC) return 1; size_t nvox3D = nim->nx * nim->ny * MAX(1,nim->nz); if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 2) { fprintf(stderr,"detrend requires a 4D image with at least three volumes\n"); return 1; } flt * img = (flt *) nim->data; #pragma omp parallel for for (size_t i = 0; i < nvox3D; i++) { flt * data = (flt *)_mm_malloc(nvol*sizeof(flt), 64); //load one voxel across all timepoints int j = 0; for (size_t v = i; v < nim->nvox; v+= nvox3D) { data[j] = img[v]; j++; } //detrend dtrend(data, nvol, 0); //save one voxel across all timepoints j = 0; for (size_t v = i; v < nim->nvox; v+= nvox3D) { img[v] = data[j]; j++; } _mm_free (data); } return 0; } #ifdef bandpass //https://github.com/QtSignalProcessing/QtSignalProcessing/blob/master/src/iir.cpp //https://github.com/rkuchumov/day_plot_diagrams/blob/8df48af431dc76b1656a627f1965d83e8693ddd7/data.c //https://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html // Sample rate and desired cutoff frequencies (in Hz). // double highcut = 1250; // double lowcut = 500; // double samp_rate = 5000; //[b,a] = butter(2, [0.009, 0.08]); //https://afni.nimh.nih.gov/afni/community/board/read.php?1,84373,137180#msg-137180 //Power 2011, Satterthwaite 2013, Carp 2011, Power's reply to Carp 2012 // https://github.com/lindenmp/rs-fMRI/blob/master/func/ButterFilt.m //https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html /* The function butterworth_filter() emulates Jan Simon's FiltFiltM it uses Gustafsson’s method and padding to reduce ringing at start/end https://www.mathworks.com/matlabcentral/fileexchange/32261-filterm?focused=5193423&tab=function Copyright (c) 2011, Jan Simon All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/ static int butterworth_filter(flt * img, int nvox3D, int nvol, double fs, double highcut, double lowcut) { //sample rate, low cut and high cut are all in Hz //this attempts to emulate performance of https://www.mathworks.com/matlabcentral/fileexchange/32261-filterm // specifically, prior to the forward and reverse pass the coefficients are estimated by a forward and reverse pass int order = 2; if (order <= 0) return 1; if ((highcut <= 0.0) && (lowcut <= 0.0)) return 1; if (fs <= 0.0) return 1; if ((lowcut > 0.0) && (highcut > 0.0)) printf("butter bandpass lowcut=%g highcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", lowcut, highcut, fs, order, 2*order); else if (highcut > 0.0) printf("butter lowpass highcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", highcut, fs, order, 2*order); else if (lowcut > 0.0) printf("butter highpass lowcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", lowcut, fs, order, 2*order); else { printf("Butterworth parameters do not make sense\n"); return 1; } double * a; double * b; double * IC; int nX = nvol; int nA = 0; nA = butter_design(order, 2.0*lowcut/fs, 2.0*highcut/fs, &a, &b, &IC); int nEdge = 3 * (nA -1); if ((nA < 1) || (nX <= nEdge)) { printf("filter requires at least %d samples\n", nEdge); _mm_free(a); _mm_free(b); _mm_free(IC); return 1; } #pragma omp parallel for for (int vx = 0; vx < nvox3D; vx++) { double * X = (double *)_mm_malloc(nX*sizeof(double), 64); size_t vo = vx; flt mn = INFINITY; flt mx = -INFINITY; for (int j = 0; j < nX; j++) { X[j] = img[vo]; mn = MIN(mn, X[j]); mx = MAX(mx, X[j]); vo += nvox3D; } if (mn < mx) { //some variability double * Xi = (double *)_mm_malloc(nEdge * sizeof(double), 64); for (int i = 0; i < nEdge; i++) Xi[nEdge-i-1] = X[0]-(X[i+1]-X[0]); double * CC = (double *)_mm_malloc((nA-1) * sizeof(double), 64); for (int i = 0; i < (nA-1); i++) CC[i] = IC[i]* Xi[0]; double * Xf = (double *)_mm_malloc(nEdge * sizeof(double), 64); for (int i = 0; i < nEdge; i++) Xf[i] = X[nX-1]-(X[nX-2-i]-X[nX-1]); Filt(Xi, nEdge, a, b, nA-1, CC); //filter head Filt(X, nX, a, b, nA-1, CC); //filter array Filt(Xf, nEdge, a, b, nA-1, CC); //filter tail //reverse for (int i = 0; i < (nA-1); i++) CC[i] = IC[i]* Xf[nEdge-1]; FiltRev(Xf, nEdge, a, b, nA-1, CC); //filter tail FiltRev(X, nX, a, b, nA-1, CC); //filter array _mm_free (Xi); _mm_free (Xf); _mm_free (CC); } else { //else no variability: set all voxels to zero for (int j = 0; j < nX; j++) X[j] = 0; } //save data to 4D array vo = vx; for (int j = 0; j < nX; j++) { img[vo] = X[j]; vo += nvox3D; } _mm_free (X); } //for vx _mm_free(b); _mm_free(a); _mm_free(IC); return 0; } static int nifti_bandpass(nifti_image * nim, double hp_hz, double lp_hz, double TRsec) { if (nim->datatype != DT_CALC) return 1; size_t nvox3D = nim->nx * nim->ny * MAX(1,nim->nz); if (TRsec <= 0.0) TRsec = nim->pixdim[4]; if (TRsec <= 0) { fprintf(stderr,"Unable to determine sample rate\n"); return 1; } if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 1) { fprintf(stderr,"bandpass requires 4D datasets\n"); return 1; } return butterworth_filter((flt *) nim->data, nvox3D, nvol, 1/TRsec, hp_hz, lp_hz); } #endif static int nifti_bptf(nifti_image * nim, double hp_sigma, double lp_sigma, int demean) { //Spielberg Matlab code: https://cpb-us-w2.wpmucdn.com/sites.udel.edu/dist/7/4542/files/2016/09/fsl_temporal_filt-15sywxn.m //5.0.7 highpass temporal filter removes the mean component https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/WhatsNew#anchor1 /* http://www.fast.u-psud.fr/ezyfit/html/ezfit.html fitting functions are: - linear y = m * x - affine or poly1 y = a*x + b - poly{n} y = a0 + a1 * x + ... + an * x^n - power y = c*x^n - sin y = a * sin (b * x) - cos y = a * cos (b * x) - exp y = a * exp (b * x) - log y = a * log (b * x) - cngauss y = exp(-x^2/(2*s^2))/(2*pi*s^2)^(1/2) - cfgauss y = a*exp(-x^2/(2*s^2)) - ngauss y = exp(-(x-x0)^2/(2*s^2))/(2*pi*s^2)^(1/2) - gauss y = a*exp(-(x-x0)^2/(2*s^2)) */ // y = a*exp(-(x-x0)^2/(2*s^2)) // regression formula (https://www.mathsisfun.com/data/least-squares-regression.html) modulated by weight if (nim->datatype != DT_CALC) return 1; if ((hp_sigma <= 0) && (lp_sigma <= 0)) return 0; size_t nvox3D = nim->nx * nim->ny * MAX(1,nim->nz); if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 1) { fprintf(stderr,"bptf requires 4D datasets\n"); return 1; } int * hpStart, * hpEnd; double * hpSumX, * hpDenom, * hpSumWt, * hp, * hp0; if (hp_sigma > 0) { //initialize high-pass reusables //Spielberg's code uses 8*sigma, does not match current fslmaths: //tested with fslmaths freq4d -bptf 10 -1 nhp //cutoff ~3: most difference: 4->0.0128902 3->2.98023e-08 2->-0.0455322 1->0.379412 int cutoffhp = ceil(3*hp_sigma); //to do: check this! ~3 hp = (double *)_mm_malloc((cutoffhp+1+cutoffhp)*sizeof(double), 64); //-cutoffhp..+cutoffhp hp0 = hp + cutoffhp; //convert from 0..(2*cutoffhp) to -cutoffhp..+cutoffhp for (int k = -cutoffhp; k <= cutoffhp; k++) //for each index in kernel hp0[k] = exp(-sqr(k)/(2 * sqr(hp_sigma))); hpStart = (int *)_mm_malloc(nvol*sizeof(int), 64); hpEnd = (int *)_mm_malloc(nvol*sizeof(int), 64); hpSumX = (double *)_mm_malloc(nvol*sizeof(double), 64); // hpDenom = (double *)_mm_malloc(nvol*sizeof(double), 64); // N*Sum(x^2) - (Sum(x))^2 hpSumWt = (double *)_mm_malloc(nvol*sizeof(double), 64); //sum of weight, N for (int v = 0; v < nvol; v++) { //linear regression with "gauss" fitting hpStart[v] = MAX(0,v-cutoffhp); hpEnd[v] = MIN(nvol-1,v+cutoffhp); double sumX = 0.0; double sumX2 = 0.0; double sumWt = 0.0; for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel int x = k-v; double wt = hp0[x]; //kernel weight sumX += wt * x; sumX2 += wt * x * x; sumWt += wt; } hpSumX[v] = sumX; hpDenom[v] = (sumWt * sumX2) - sqr(sumX); // N*Sum(x^2) - (Sum(x))^2 if (hpDenom[v] == 0.0) hpDenom[v] = 1.0; //should never happen, x is known index hpDenom[v] = 1.0 / hpDenom[v]; //use reciprocal so we can use faster multiplication later hpSumWt[v] = sumWt; } //for each volume } //high-pass reusables //low-pass AFTER high-pass: fslmaths freq4d -bptf 45 5 fbp int * lpStart, * lpEnd; double * lpSumWt, * lp, * lp0; if (lp_sigma > 0) { //initialize low-pass reusables //simple Gaussian blur in time domain //freq4d -bptf -1 5 flp // fslmaths rest -bptf -1 5 flp // 3->0.00154053 4->3.5204e-05 5->2.98023e-07, 6->identical // Spielberg's code uses 8*sigma, so we will use that, even though precision seems excessive int cutofflp = ceil(8*lp_sigma); //to do: check this! at least 6 lp = (double *)_mm_malloc((cutofflp+1+cutofflp)*sizeof(double), 64); //-cutofflp..+cutofflp lp0 = lp + cutofflp; //convert from 0..(2*cutofflp) to -cutofflp..+cutofflp for (int k = -cutofflp; k <= cutofflp; k++) //for each index in kernel lp0[k] = exp(-sqr(k)/(2 * sqr(lp_sigma))); lpStart = (int *)_mm_malloc(nvol*sizeof(int), 64); lpEnd = (int *)_mm_malloc(nvol*sizeof(int), 64); lpSumWt = (double *)_mm_malloc(nvol*sizeof(double), 64); //sum of weight, N for (int v = 0; v < nvol; v++) { lpStart[v] = MAX(0,v-cutofflp); lpEnd[v] = MIN(nvol-1,v+cutofflp); double sumWt = 0.0; for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel sumWt += lp0[k-v]; //kernel weight if (sumWt == 0.0) sumWt = 1.0; //will never happen lpSumWt[v] = 1.0 / sumWt; //use reciprocal so we can use faster multiplication later } //for each volume } //low-pass reusables //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;5b8cace9.0902 //if TR=2s and 100 second cutoff is requested choose "-bptf 50 -1" //The 'cutoff' is defined as the FWHM of the filter, so if you ask for //100s that means 50 Trs, so the sigma, or HWHM, is 25 TRs. // -bptf <hp_sigma> <lp_sigma> flt * img = (flt *) nim->data; #pragma omp parallel for for (size_t i = 0; i < nvox3D; i++) { //read input data flt * imgIn = (flt *)_mm_malloc((nvol)*sizeof(flt), 64); flt * imgOut = (flt *)_mm_malloc((nvol)*sizeof(flt), 64); int j = 0; for (size_t v = i; v < nim->nvox; v+= nvox3D) { imgIn[j] = img[v]; j++; } if (hp_sigma > 0) { double sumOut = 0.0; for (int v = 0; v < nvol; v++) { //each volume double sumY = 0.0; double sumXY = 0.0; for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel int x = k-v; double wt = hp0[x]; flt y = imgIn[k]; sumY += wt * y; sumXY += wt * x * y; } double n = hpSumWt[v]; double m = ((n*sumXY) - (hpSumX[v] * sumY) ) * hpDenom[v]; //slope double b = (sumY - (m * hpSumX[v]))/n; //intercept imgOut[v] = imgIn[v] - b; sumOut += imgOut[v]; } //for each volume //"fslmaths -bptf removes timeseries mean (for FSL 5.0.7 onward)" n.b. except low-pass double mean = sumOut / (double)nvol; //de-mean AFTER high-pass if (demean) { for (int v = 0; v < nvol; v++) //each volume imgOut[v] -= mean; } } //hp_sigma > 0 if (lp_sigma > 0) { //low pass does not de-mean data //if BOTH low-pass and high-pass, apply low pass AFTER high pass: // fslmaths freq4d -bptf 45 5 fbp // difference 1.86265e-08 //still room for improvement: // fslmaths /Users/chris/src/rest -bptf 45 5 fbp // r=1.0 identical voxels 73% max difference 0.000488281 if (hp_sigma > 0) memcpy(imgIn, imgOut, nvol*sizeof(flt)); for (int v = 0; v < nvol; v++) { //each volume double sum = 0.0; for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel sum += imgIn[k] * lp0[k-v]; imgOut[v] = sum * lpSumWt[v]; } // for each volume } //lp_sigma > 0 //write filtered data j = 0; for (size_t v = i; v < nim->nvox; v+= nvox3D) { img[v] = imgOut[j]; j++; } _mm_free (imgIn); _mm_free (imgOut); } if (hp_sigma > 0) { //initialize high-pass reuseables _mm_free (hp); _mm_free (hpStart); _mm_free (hpEnd); _mm_free (hpSumX); _mm_free (hpDenom); _mm_free (hpSumWt); } if (lp_sigma > 0) { //initialize high-pass reuseables _mm_free (lp); _mm_free (lpStart); _mm_free (lpEnd); _mm_free (lpSumWt); } return 0; } // nifti_bptf() static int nifti_demean(nifti_image * nim) { if (nim->datatype != DT_CALC) return 1; size_t nvox3D = nim->nx * nim->ny * MAX(1,nim->nz); if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 1) { fprintf(stderr,"demean requires 4D datasets\n"); return 1; } flt * img = (flt *) nim->data; #pragma omp parallel for for (size_t i = 0; i < nvox3D; i++) { double sum = 0.0; for (size_t v = i; v < nim->nvox; v+= nvox3D) sum += img[v]; double mean = sum / nvol; for (size_t v = i; v < nim->nvox; v+= nvox3D) img[v] -= mean; } return 0; } static int nifti_dim_reduce(nifti_image * nim, enum eDimReduceOp op, int dim, int percentage) { //e.g. nifti_dim_reduce(nim, Tmean, 4) reduces 4th dimension, saving mean int nReduce = nim->dim[dim]; if ((nReduce <= 1) || (dim < 1) || (dim > 4)) return 0; //nothing to reduce, fslmaths does not generate an error if ((nim->nvox < 1) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1)) return 1; //size_t nvox3D = nim->nx * nim->ny * nim->nz; //int nvol = nim->nvox / nvox3D; //if ((nvox3D * nvol) != nim->nvox) return 1; if (nim->datatype != DT_CALC) return 1; if (nim->dim[0] > 4) fprintf(stderr,"dimension reduction collapsing %"PRId64"D into to 4D\n", nim->dim[0]); int dims[8], indims[8]; for (int i = 0; i < 4; i++ ) dims[i] = MAX(nim->dim[i],1); //XYZT limits to 4 dimensions, so collapse dims [4,5,6,7] dims[4] = nim->nvox / (dims[1]*dims[2]*dims[3]); for (int i = 5; i < 8; i++ ) dims[i] = 1; for (int i = 0; i < 8; i++ ) indims[i] = dims[i]; if ((dims[1]*dims[2]*dims[3]*dims[4]) != nim->nvox) return 1; //e.g. data in dim 5..7! dims[dim] = 1; if (dim == 4) dims[0] = 3; //reduce 4D to 3D size_t nvox = dims[1]*dims[2]*dims[3]*dims[4]; flt * i32 = (flt *) nim->data; void * dat = (void *)calloc(1,nim->nvox * sizeof(flt)) ; flt * o32 = (flt *) dat; int collapseStep; //e.g. if we collapse 4th dimension, we will collapse across voxels separated by X*Y*Z if (dim == 1) collapseStep = 1; //collapse by columns else if (dim == 2) collapseStep = indims[1]; //collapse by rows else if (dim == 3) collapseStep = indims[1]*indims[2]; //collapse by slices else collapseStep = indims[1]*indims[2]*indims[3]; //collapse by volumes int xy = dims[1]*dims[2]; int xyz = xy * dims[3]; if ((op == Tmedian) || (op == Tstd) || (op == Tperc) || (op == Tar1)) { //for even number of items, two options for median, consider 4 volumes ranked // meam of 2nd and 3rd: problem one can return values not in data // 2nd value. Representative //here we use the latter approach //int itm = ((nReduce-1) * 0.5); int itm = (nReduce * 0.5); //seems correct tested with odd and even number of volumes if (op == Tperc) { double frac = ((double)percentage)/100.0; //itm = ((nReduce-1) * frac); itm = ((nReduce) * frac); itm = MAX(itm, 0); itm = MIN(itm, nReduce-1); } #pragma omp parallel for for (size_t i = 0; i < nvox; i++ ) { flt * vxls = (flt *)_mm_malloc((nReduce)*sizeof(flt), 64); size_t inPos = i; if (dim < 4) { //i is in output space, convert to input space, allows single loop for OpenMP int T = (i / xyz); //volume int r = i % (xyz); int Z = (r / xy); //slice r = r % (xy); int Y = (r / dims[1]); //row int X = r % dims[1]; inPos = X+(Y*indims[1])+(Z*indims[1]*indims[2])+(T*indims[1]*indims[2]*indims[3]); } for (int v = 0; v < nReduce; v++ ) { vxls[v] = i32[inPos]; inPos += collapseStep; } if ((op == Tstd) || (op == Tar1)) { //computed in cache, far fewer operations than Welford //note 64-bit double precision even if 32-bit DT_CALC //neither precision gives identical results // double precision attenuates catastrophic cancellation double sum = 0.0; for (int v = 0; v < nReduce; v++ ) sum += vxls[v]; double mean = sum / nReduce; double sumSqr = 0.0; for (int v = 0; v < nReduce; v++ ) sumSqr += sqr(vxls[v]- mean); if (op == Tstd) o32[i] = sqrt(sumSqr / (nReduce - 1)); else { //Tar1 if (sumSqr == 0.0) { o32[i] = 0.0; continue; } for (int v = 0; v < nReduce; v++ ) vxls[v] = vxls[v] - mean; //demean double r = 0.0; for (int v = 1; v < nReduce; v++ ) r += (vxls[v] * vxls[v-1])/sumSqr; o32[i] = r; } } else { //Tperc or Tmedian qsort (vxls, nReduce, sizeof(flt), compare); o32[i] = vxls[itm]; } _mm_free (vxls); } //for i: each voxel } else { #pragma omp parallel for for (size_t i = 0; i < nvox; i++ ) { size_t inPos = i; //ok if dim==4 if (dim < 4) { //i is in output space, convert to input space, allows single loop for OpenMP int T = (i / xyz); //volume int r = i % (xyz); int Z = (r / xy); //slice r = r % (xy); int Y = (r / dims[1]); //row int X = r % dims[1]; inPos = X+(Y*indims[1])+(Z*indims[1]*indims[2])+(T*indims[1]*indims[2]*indims[3]); } double sum = 0.0; flt mx = i32[inPos]; flt mn = mx; int mxn = 0; //flt sd = 0.0; //flt mean = 0.0; for (int v = 0; v < nReduce; v++ ) { flt f = i32[inPos]; sum += f; if (f > mx) { mx = f; mxn = v; } mn = MIN(mn, f); //Welford https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance // 2-pass method faster //flt delta = f - mean; //mean = mean + delta / (v+1); //sd = sd + delta*(f- mean); inPos += collapseStep; } if (op == Tmean) o32[i] = sum / nReduce; //mean else if (op == Tmax) o32[i] = mx; //max else if (op == Tmaxn) o32[i] = mxn; //maxn else if (op == Tmin) o32[i] = mn; //min } } //if opel nim->nvox = nvox; for (int i = 0; i < 4; i++ ) nim->dim[i] = dims[i]; nim->ndim = dims[0]; nim->nx = dims[1]; nim->ny = dims[2]; nim->nz = dims[3]; nim->nt = dims[4]; nim->nu = dims[5]; nim->nv = dims[6]; nim->nw = dims[7]; free(nim->data); nim->data = dat; return 0; } //Tar1 enum eOp{unknown, add, sub, mul, divX, rem, mod, mas, thr, thrp, thrP, uthr, uthrp, uthrP, max, min, power, seed, inm, ing, smth, exp1,log1,sin1,cos1,tan1,asin1,acos1,atan1,sqr1,sqrt1,recip1,abs1,bin1,binv1,edge1, index1, nan1, nanm1, rand1, randn1,range1, rank1, ranknorm1, pval1, pval01, cpval1, ztop1, ptoz1, dilMk,dilDk,dilFk,dilallk,erok,eroFk,fmediank,fmeank,fmeanuk, subsamp2,subsamp2offc }; static int * make_kernel_gauss(nifti_image * nim, int * nkernel, double sigmamm) { sigmamm = fabs(sigmamm); if (sigmamm == 0.0) return NULL; double mmCutoff = sigmamm * 6.0; //maximum extent int x = (2*floor(mmCutoff/nim->dx))+1; int y = (2*floor(mmCutoff/nim->dy))+1; int z = (2*floor(mmCutoff/nim->dz))+1; int xlo = (int)(-x / 2); int ylo = (int)(-y / 2); int zlo = (int)(-z / 2); //betterthanfsl // fsl computes gaussian for all values in cube // from first principles, a spherical filter has less bias // since weighting is very low at these edge voxels, it has little impact on // "-fmean", however with other filters like "dilM", fsl's solution works like // a "box" filter, not a "sphere" filter // default is to clone fsl #ifdef betterthanfsl //true sphere at cutouff //first pass: determine number of surviving voxels (n) int n = 0; for (int zi = zlo; zi < (zlo+z); zi++ ) for (int yi = ylo; yi < (ylo+y); yi++ ) for (int xi = xlo; xi < (xlo+x); xi++ ) { flt dx = (xi * nim->dx); flt dy = (yi * nim->dy); flt dz = (zi * nim->dz); flt dist = sqrt(dx*dx + dy*dy + dz*dz); if (dist > mmCutoff) continue; n++; } *nkernel = n; int kernelWeight = (int)((double)INT_MAX/(double)n); //requires <limits.h> int * kernel = (int *)_mm_malloc((n*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight double * wt = (double *)_mm_malloc((n)*sizeof(double), 64); //precess weight: temporary //second pass: fill surviving voxels int i = 0; double expd = 2.0*sigmamm*sigmamm; for (int zi = zlo; zi < (zlo+z); zi++ ) for (int yi = ylo; yi < (ylo+y); yi++ ) for (int xi = xlo; xi < (xlo+x); xi++ ) { flt dx = (xi * nim->dx); flt dy = (yi * nim->dy); flt dz = (zi * nim->dz); flt dist = sqrt(dx*dx + dy*dy + dz*dz); if (dist > mmCutoff) continue; kernel[i] = xi + (yi * nim->nx) + (zi * nim->nx * nim->ny); kernel[i+n] = xi; //left-right wrap detection kernel[i+n+n] = yi; //anterior-posterior wrap detection //kernel[i+n+n+n] = kernelWeight; //kernel height wt[i] = exp(-1.0*(dist*dist)/expd); i++; } #else int n = x * y * z; *nkernel = n; int * kernel = (int *)_mm_malloc((n*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight double * wt = (double *)_mm_malloc((n)*sizeof(double), 64); //precess weight: temporary int i = 0; double expd = 2.0*sigmamm*sigmamm; for (int zi = zlo; zi < (zlo+z); zi++ ) for (int yi = ylo; yi < (ylo+y); yi++ ) for (int xi = xlo; xi < (xlo+x); xi++ ) { flt dx = (xi * nim->dx); flt dy = (yi * nim->dy); flt dz = (zi * nim->dz); flt dist = sqrt(dx*dx + dy*dy + dz*dz); //if (dist > mmCutoff) continue; //<- fsl fills all kernel[i] = xi + (yi * nim->nx) + (zi * nim->nx * nim->ny); kernel[i+n] = xi; //left-right wrap detection kernel[i+n+n] = yi; //anterior-posterior wrap detection //kernel[i+n+n+n] = kernelWeight; //kernel height wt[i] = exp(-1.0*(dist*dist)/expd); i++; } #endif double sum = 0.0; for (int i = 0; i < n; i++ ) sum += wt[i]; //sum of entire gaussian is 1 double scale = 1.0 / sum; scale *= (double)INT_MAX; //we use integer scaling: in future faster to typecast integer as flt (if int=32bit) or double (if int=64bit) for (int i = 0; i < n; i++ ) kernel[i+n+n+n] = wt[i]*scale; _mm_free (wt); return kernel; } //make_kernel_gauss() static flt calmax(nifti_image * nim){ if ((nim->nvox < 1) || (nim->datatype != DT_CALC)) return 0.0; flt * in32 = (flt *) nim->data; flt mx = in32[0]; for (size_t i = 0; i < nim->nvox; i++ ) mx = MAX(mx, in32[i]); return mx; } static flt calmin(nifti_image * nim){ if ((nim->nvox < 1) || (nim->datatype != DT_CALC)) return 0.0; flt * in32 = (flt *) nim->data; flt mn = in32[0]; for (size_t i = 0; i < nim->nvox; i++ ) mn = MIN(mn, in32[i]); return mn; } /*void swapSign(nifti_image * nim){ if ((nim->nvox < 1) || (nim->datatype != DT_CALC)) return; flt * in32 = (flt *) nim->data; for (size_t i = 0; i < nim->nvox; i++ ) in32[i] = -in32[i]; }*/ static int nifti_tensor_2(nifti_image * nim, int lower2upper) { int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; if ((nim->datatype != DT_CALC) || (nvox3D < 1)) return 1; int nVol = (int)(nim->nvox/nvox3D); if (nVol != 6) { fprintf(stderr,"nifti_tensor_2: input must have precisely 6 volumes (not %d)\n", nVol); return 1; } //3dAFNItoNIFTI does not set intent_code to NIFTI_INTENT_SYMMATRIX, so check dimensions if ((lower2upper) && (nim->dim[4] == 6)) fprintf(stderr,"nifti_tensor_2: check images (header suggests already in upper triangle format)\n"); if ((!lower2upper) && (nim->dim[4] == 6)) fprintf(stderr,"nifti_tensor_2: check images (header suggests already in lower triangle format)\n"); //lower xx xy yy xz yz zz //upper xx xy xz yy yz zz //swap volumes 3 and 4 flt * in32 = (flt *) nim->data; flt * tmp = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); flt * v3 = in32 + (2 * nvox3D); flt * v4 = in32 + (3 * nvox3D); memcpy(tmp, v4, nvox3D*sizeof(flt)); memcpy(v4, v3, nvox3D*sizeof(flt)); memcpy(v3, tmp, nvox3D*sizeof(flt)); _mm_free (tmp); if (lower2upper) { //FSL uses non-standard upper triangle nim->dim[0] = 4; for (int i = 4; i < 8; i++) nim->dim[i] = 1; nim->dim[4] = 6; nim->ndim = 4; nim->nt = 6; nim->nu = 1; nim->nv = 1; nim->nw = 1; } else { //upper2lower //lower is NIfTI default, used by AFNI, Camino, ANTS nim->intent_code = NIFTI_INTENT_SYMMATRIX; /*! To store an NxN symmetric matrix at each voxel: - dataset must have a 5th dimension - intent_code must be NIFTI_INTENT_SYMMATRIX - dim[5] must be N*(N+1)/2 - intent_p1 must be N (in float format) - the matrix values A[i][[j] are stored in row-order: - A[0][0] - A[1][0] A[1][1] - A[2][0] A[2][1] A[2][2] - etc.: row-by-row */ nim->dim[0] = 5; for (int i = 4; i < 8; i++) nim->dim[i] = 1; nim->dim[5] = 6; nim->ndim = 5; nim->nt = 1; nim->nu = 6; nim->nv = 1; nim->nw = 1; } return 0; } static int nifti_tensor_decomp(nifti_image * nim, int isUpperTriangle) { // MD= (Dxx+Dyy+Dzz)/3 //https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software // dtifit produces upper-triangular order: xx xy xz yy yz zz //MD = 1/3*(Dxx+Dyy+Dzz) //FA= sqrt(3/2)*sqrt(((Dx-MD)^2+(Dy-MD)^2+(Dz-MD^2))/(Dx^2+Dy^2+Dz^2)) //fslmaths tensor.nii -tensor_decomp bork.nii // 3dDTeig -uddata -sep_dsets -prefix AFNIdwi.nii tensor.nii //3dDTeig expects LOWER diagonal order unless -uddata // Dxx,Dxy,Dyy,Dxz,Dyz,Dzz // https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dDTeig.html //dxx, dxy, dyy, dxz, dyz, dzz // 3dDTeig -uddata -prefix AFNIdwi.nii tensor.nii // fslmaths tensor.nii -tensor_decomp bork.nii // Creates 5*3D and 3*4D files for a total of 14 volumes L1,L2,L3,V1(3),V2(3),V3(3),FA,MD #ifdef tensor_decomp if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nVol = (int)(nim->nvox/nvox3D); if (nVol != 6) { fprintf(stderr,"nifti_tensor_decomp: input must have precisely 6 volumes (not %d)\n", nVol); return 1; } flt * in32 = (flt *) nim->data; //detect if data is upper or lower triangle // The "YY" component should be brighter (stronlgy positive) than the off axis XZ #define detectUpperOrLower #ifdef detectUpperOrLower double sumV3 = 0.0; //3rd volume, YY for lower, XZ for upper double sumV4 = 0.0; //4th volume, XZ for lower, YY for upper flt * v32 = in32 + (nvox3D * 2); //offset to 3rd volume for (size_t i = 0; i < nvox3D; i++ ) sumV3 += v32[i]; v32 = in32 + (nvox3D * 3); //offset to 4th volume for (size_t i = 0; i < nvox3D; i++ ) sumV4 += v32[i]; if ((sumV4 > sumV3) && (!isUpperTriangle)) fprintf(stderr,"nifti_tensor_decomp: check results, input looks like UPPER triangle.\n"); if ((sumV4 < sumV3) && (isUpperTriangle)) fprintf(stderr,"nifti_tensor_decomp: check results, input looks like LOWER triangle.\n"); #endif flt * out32 = (flt *)_mm_malloc(14*nvox3D*sizeof(flt), 64); for (size_t i = 0; i < nvox3D; i++ ) { //n.b. in6 and out14 are ALWAYS float regradless of DT32, e.g. single even if DT=double float * in6 = (float *)_mm_malloc(6*sizeof(float), 64); float * out14 = (float *)_mm_malloc(14*sizeof(float), 64); size_t iv = i; for (int v = 0; v < 6; v++) { in6[v] = in32[iv]; iv += nvox3D; } EIG_tsfunc(0.0, 0.0, 0, in6, 0.0, 0.0, NULL, 0, out14, isUpperTriangle); size_t ov = i; for (int v = 0; v < 14; v++) { out32[ov] = out14[v]; ov += nvox3D; } _mm_free (out14); _mm_free (in6); } free(nim->data); // Creates 5*3D and 3*4D files for a total of 14 volumes L1(0),L2(1),L3(2),V1(3,4,5),V2(6,7,8),V3(9,10,11),FA(12),MD(13) flt * outv; //save 4D images nim->cal_min = -1; nim->cal_max = 1; nim->nvox = nvox3D * 3; nim->ndim = 4; nim->nt = 3; nim->nu = 1; nim->nv = 1; nim->nw = 1; nim->dim[0] = 4; nim->dim[4] = 3; for (int i = 5; i < 8; i++) nim->dim[i] = 1; //void * dat = (void *)calloc(1, 3*nvox3D * sizeof(flt)) ; //nim->data = dat; //flt * fa32 = (flt *) dat; //save V1 outv = out32 + (nvox3D * 3); //memcpy(fa32, outv, 3*nvox3D*sizeof(flt)); /*for (size_t i = 0; i < (3*nvox3D); i++ ) if (outv[i] != 0.0) // do not create "-0.0" outv[i] = -outv[i]; */ nim->data = (void *)outv; nifti_save(nim, "_V1"); //save V2 outv = out32 + (nvox3D * 6); //memcpy(fa32, outv, 3*nvox3D*sizeof(flt)); nim->data = (void *)outv; nifti_save(nim, "_V2"); //save V3 outv = out32 + (nvox3D * 9); //memcpy(fa32, outv, 3*nvox3D*sizeof(flt)); nim->data = (void *)outv; nifti_save(nim, "_V3"); //release 4D memory //free(dat); //save 3D images nim->cal_min = 0; nim->cal_max = 0; nim->nvox = nvox3D * 1; nim->ndim = 3; nim->nt = 1; nim->dim[0] = 3; nim->dim[4] = 1; //save L1 outv = out32; //memcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_max = calmax(nim); nifti_save(nim, "_L1"); //save L2 outv = out32 + (nvox3D * 1); //memcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_max = calmax(nim); nifti_save(nim, "_L2"); //save L3 outv = out32 + (nvox3D * 2); //memcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_max = calmax(nim); nifti_save(nim, "_L3"); //save MD outv = out32 + (nvox3D * 13); //memcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_min = calmin(nim); nim->cal_max = calmax(nim); nifti_save(nim, "_MD"); //single volume data void * dat = (void *)calloc(1, nvox3D * sizeof(flt)) ; nim->data = dat; flt * fa32 = (flt *) dat; //save MO //MODE https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;4fbed3d1.1103 // compute MO (MODE) from L1, L2, L3, MD //e1=l1-MD, e2=l2-MD, e3=l3-MD; //n = (e1 + e2 - 2*e3)*(2*e1 - e2 - e3)*(e1 - 2*e2 + e3); //d = (e1*e1 + e2*e2 + e3*e3 - e1*e2 - e2*e3 - e1*e3); //d = 2*d*d*d; //mode = n/d; //something is wrong with this formula. // a. Ennis 2006 includes a sqrt that can not be factored out // b. results differ from fslmaths nim->cal_min = -1; nim->cal_max = 1; flt * L1 = out32; flt * L2 = out32 + (nvox3D * 1); flt * L3 = out32 + (nvox3D * 2); flt * MD = out32 + (nvox3D * 13); for (size_t i = 0; i < nvox3D; i++ ) { flt e1 = L1[i] - MD[i]; flt e2 = L2[i] - MD[i]; flt e3 = L3[i] - MD[i]; flt n = (e1 + e2 - 2*e3)*(2*e1 - e2 - e3)*(e1 - 2*e2 + e3); flt d = (e1*e1 + e2*e2 + e3*e3 - e1*e2 - e2*e3 - e1*e3); d = sqrt(d); //Correlation r = 0.999746 d = 2*d*d*d; //d = sqrt(d); //Correlation r = 0.990319 if (d != 0) d = n / d; //mode d = MIN(d, 1.0); d = MAX(d, -1.0); fa32[i] = d; } nifti_save(nim, "_MO"); //save FA outv = out32 + (nvox3D * 12); memcpy(fa32, outv, nvox3D*sizeof(flt)); nim->cal_min = 0; nim->cal_max = 1; nifti_save(nim, "_FA"); //keep FA in memory nim->cal_max = 0; _mm_free (out32); return 0; #else fprintf(stderr,"not compiled to support tensor_decomp\n"); return 1; #endif } //nifti_tensor_decomp() static void kernel3D_dilall( nifti_image * nim, int * kernel, int nkernel, int vol) { int nVox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; flt * f32 = (flt *) nim->data; f32 += (nVox3D * vol); flt * inf32 = (flt *)_mm_malloc(nVox3D*sizeof(flt), 64); memcpy(inf32, f32, nVox3D*sizeof(flt)); int nxy = nim->nx * nim->ny; size_t nZero = 1; while (nZero > 0) { nZero = 0; for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; if (f32[i] != 0.0) continue; int nNot0 = 0; flt sum = 0.0f; for (size_t k = 0; k < nkernel; k++) { size_t vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] == 0.0)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior nNot0 ++; sum += inf32[vx]; } //for k if (nNot0 > 0) f32[i] = sum / nNot0; nZero++; } //for x } //for y } //for z memcpy(inf32, f32, nVox3D*sizeof(flt)); //printf("n=0: %zu\n", nZero); } //nZero > 0 _mm_free (inf32); } //kernel3D_dilall() static int kernel3D( nifti_image * nim, enum eOp op, int * kernel, int nkernel, int vol) { int nVox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; flt * f32 = (flt *) nim->data; f32 += (nVox3D * vol); flt * inf32 = (flt *)_mm_malloc(nVox3D*sizeof(flt), 64); memcpy(inf32, f32, nVox3D*sizeof(flt)); int nxy = nim->nx * nim->ny; if (op == fmediank) { flt * vxls = (flt *)_mm_malloc((nkernel)*sizeof(flt), 64); for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; int nOK = 0; for (size_t k = 0; k < nkernel; k++) { size_t vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior vxls[nOK] = inf32[vx]; nOK ++; } //for k qsort (vxls, nOK, sizeof(flt), compare); int itm = (nOK * 0.5); f32[i] = vxls[itm]; } //for x } //for y } //for z _mm_free (vxls); } else if (op == dilMk) { for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; if (f32[i] != 0.0) continue; int nNot0 = 0; flt sum = 0.0f; for (size_t k = 0; k < nkernel; k++) { size_t vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] == 0.0)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior nNot0 ++; sum += inf32[vx]; } //for k if (nNot0 > 0) f32[i] = sum / nNot0; } //for x } //for y } //for z } else if (op == dilDk){ //maximum - fslmaths 6.0.1 emulation, note really MODE, max non-zero for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; if (f32[i] != 0.0) continue; //flt mx = -INFINITY; flt mx = NAN; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior flt v = inf32[vx]; if (v == 0.0) continue; mx = MAX(mx,inf32[vx]); //with dilD a input voxel of 0 } //for k //https://stackoverflow.com/questions/570669/checking-if-a-double-or-float-is-nan-in-c // f != f will be true only if f is NaN if (!(mx != mx)) f32[i] = mx; } //for x } //for y } //for z } else if (op == dilFk) { //maximum - fslmaths 6.0.1 appears to use "dilF" when the user requests "dilD" for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; flt mx = f32[i]; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] <= mx)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior mx = MAX(mx,inf32[vx]); //if (mx < 0) continue; //with dilF, do not make a zero voxel darker than 0 } //for k f32[i] = mx; } //for x } //for y } //for z } else if (op == dilallk) {// -dilall : Apply -dilM repeatedly until the entire FOV is covered"); kernel3D_dilall(nim, kernel, nkernel, vol); } else if (op == eroFk) { //Minimum filtering of all voxels for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior f32[i] = MIN(f32[i], inf32[vx]); } //for k } //for x } //for y } //for z } else if (op == fmeank) { flt * kwt = (flt *)_mm_malloc(nkernel*sizeof(flt), 64); for (int k = 0; k < nkernel; k++) kwt[k] = ((double)kernel[k+nkernel+nkernel+nkernel]/(double)INT_MAX ); for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; flt sum = 0.0f; flt wt = 0.0f; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior sum += (inf32[vx]* kwt[k]); wt += kwt[k]; } //for k f32[i] = sum / wt; } //for x } //for y } //for z _mm_free (kwt); } else if (op == fmeanuk) { flt * kwt = (flt *)_mm_malloc(nkernel*sizeof(flt), 64); for (int k = 0; k < nkernel; k++) kwt[k] = ((double)kernel[k+nkernel+nkernel+nkernel]/(double)INT_MAX ); for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; flt sum = 0.0f; //flt wt = 0.0f; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior sum += (inf32[vx]* kwt[k]); //wt += kwt[k]; } //for k //f32[i] = sum / wt; f32[i] = sum; } //for x } //for y } //for z _mm_free (kwt); } else if (op == erok) { for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; if (f32[i] == 0.0) continue; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] != 0.0)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior f32[i] = 0.0; } //for k } //for x } //for y } //for z } else { fprintf(stderr,"kernel3D: Unsupported operation\n"); _mm_free (inf32); return 1; } _mm_free (inf32); return 0; } //kernel3D static int nifti_kernel ( nifti_image * nim, enum eOp op, int * kernel, int nkernel) { if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nVox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nVol = (int)(nim->nvox/nVox3D); if (nVol < 1) return 1; if ((nkernel < 1) || (kernel == NULL)) return 1; for (int v = 0; v < nVol; v++ ) { int ok = kernel3D(nim, op, kernel, nkernel, v); if (ok != 0) return ok; } return 0; } static int nifti_roi ( nifti_image * nim, int xmin, int xsize, int ymin, int ysize, int zmin, int zsize, int tmin, int tsize) { // "fslmaths LAS -roi 3 32 0 40 0 40 0 5 f " int nt = nim->nvox / (nim->nx * nim->ny * nim->nz); if ((nim->nvox < 1) || (nt < 1)) return 1; if (nim->datatype != DT_CALC) return 1; flt * f32 = (flt *) nim->data; //if (neg_determ(nim)) // do something profound; //determinants do not seem to influence "-roi"? int xmax = xmin + xsize - 1; int ymax = ymin + ysize - 1; int zmax = zmin + zsize - 1; int tmax = tmin + tsize - 1; //printf("%d..%d", zmin, zmax); size_t i = 0; for (int t = 0; t < nt; t++) { int tOK = 1; if ((t < tmin) || (t > tmax)) tOK = 0; for (int z = 0; z < nim->nz; z++) { int zOK = 1; if ((z < zmin) || (z > zmax)) zOK = 0; for (int y = 0; y < nim->ny; y++) { int yOK = 1; if ((y < ymin) || (y > ymax)) yOK = 0; for (int x = 0; x < nim->nx; x++) { int xOK = 1; if ((x < xmin) || (x > xmax)) xOK = 0; if ((xOK == 0) || (yOK == 0) || (zOK == 0) || (tOK == 0)) f32[i] = 0.0; i++; } //x } //y } //z }//t return 0; } static int nifti_sobel( nifti_image * nim, int offc) { //sobel is simply one kernel pass per dimension. // this could be achieved with successive passes of "-kernel" // here it is done in a single pass for cache efficiency // https://en.wikipedia.org/wiki/Sobel_operator int vox3D = nim->nx*nim->ny*MAX(nim->nz,1); if (nim->datatype != DT_CALC) return 1; int nvol = nim->nvox/vox3D; int numk = 6;//center voxel and all its neighbors int * kx = (int *)_mm_malloc((numk*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight int * ky = (int *)_mm_malloc((numk*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight int * kz = (int *)_mm_malloc((numk*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight int i = 0; for (int x = 0; x <= 1; x++) for (int y = -1; y <= 1; y++) { int sgn = (2*x)-1; //-1 or +1 int weight = sgn * (2 - abs(y)); //kx compare left and right kx[i+numk] = (2*x)-1; //left/right wrap kx[i+numk+numk] = y; //anterior/posterior wrap kx[i] = kx[i+numk] + (kx[i+numk+numk] * (nim->nx)); //voxel offset kx[i+numk+numk+numk] = weight; //weight //ky compare anterior and posterior ky[i+numk] = y; //left/right wrap ky[i+numk+numk] = (2*x)-1; //anterior/posterior wrap ky[i] = ky[i+numk] + (ky[i+numk+numk] * (nim->nx)); //voxel offset ky[i+numk+numk+numk] = weight; //weight //kz superior/inferior kz[i+numk] = y; //left/right wrap kz[i+numk+numk] = 0; //anterior/posterior wrap kz[i] = y + (((2*x)-1) * nim->nx * nim->ny); //voxel offset kz[i+numk+numk+numk] = weight; //weight //printf("x%d y%d wt%d\n", kx[i+numk], kx[i+numk+numk], kx[i+numk+numk+numk]); //printf("x%d y%d wt%d\n", ky[i+numk], ky[i+numk+numk], ky[i+numk+numk+numk]); i++; } //for y flt * i32 = (flt *) nim->data; //input volumes #pragma omp parallel for for (int v = 0; v < nvol; v++) { flt * iv32 = i32 + (v * vox3D); flt * imgin = _mm_malloc(vox3D*sizeof(flt), 64); //input values prior to blur memcpy(imgin, iv32, vox3D*sizeof(flt)); int i = 0; for (int z = 0; z < nim->nz; z++ ) for (int y = 0; y < nim->ny; y++ ) for (size_t x = 0; x < nim->nx; x++ ) { //compute z gradient flt gx = 0.0f; for (size_t k = 0; k < numk; k++) { size_t vx = i + kx[k]; if ((vx < 0) || (vx >= vox3D)) continue; //next handle edge cases int dx = x+kx[k+numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kx[k+numk+numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior gx += imgin[vx] * kx[k+numk+numk+numk] ; } //for k //compute y gradient flt gy = 0.0f; for (size_t k = 0; k < numk; k++) { size_t vx = i + ky[k]; if ((vx < 0) || (vx >= vox3D)) continue; //next handle edge cases int dx = x+ky[k+numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+ky[k+numk+numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior gy += imgin[vx] * ky[k+numk+numk+numk] ; } //for k //compute z gradient flt gz = 0.0f; //always 0 for 2D, we could add conditional to skip but optimize for 3D for (size_t k = 0; k < numk; k++) { size_t vx = i + kz[k]; if ((vx < 0) || (vx >= vox3D)) continue; //next handle edge cases int dx = x+kz[k+numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kz[k+numk+numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior gz += imgin[vx] * kz[k+numk+numk+numk] ; } //for k iv32[i] = sqrt(sqr(gx)+sqr(gy)+sqr(gz)); i++; } //for x _mm_free (imgin); } _mm_free (kx); _mm_free (ky); _mm_free (kz); return 0; } //nifti_sobel() static int nifti_subsamp2 ( nifti_image * nim, int offc) { //naive downsampling: this is provided purely to mimic the behavior of fslmaths // see https://nbviewer.jupyter.org/urls/dl.dropbox.com/s/s0nw827nc4kcnaa/Aliasing.ipynb // no anti-aliasing filter https://en.wikipedia.org/wiki/Image_scaling int invox3D = nim->nx*nim->ny*MAX(nim->nz,1); int indim[5]; for (int i = 1; i < 5; i++) indim[i] = MAX(nim->dim[i],1); int nvol = nim->nvox/invox3D; int x_odd = indim[1] % 2; if ((nim->nvox < 1) || (nvol < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nx = ceil(nim->nx * 0.5); int ny = ceil(nim->ny * 0.5); int nz = ceil(nim->nz * 0.5); if ((nx == nim->nx) && (ny == nim->ny) && (nz == nim->nz)) return 0; int nvox3D = nx*ny*nz; flt * i32 = (flt *) nim->data; void * dat = (void *)calloc(1, nvox3D * nvol * sizeof(flt)) ; flt * o32 = (flt *) dat; int x_flip = 0; if (!neg_determ(nim)) x_flip = 1; if (offc) { int * wt = _mm_malloc(nvox3D * nvol *sizeof(int), 64); //weight, just for edges for (int i = 0; i < (nvox3D * nvol); i++) { wt[i] = 0; o32[i] = 0.0; } int boost = 0; if ((x_odd) && (x_flip)) boost = 1; size_t i = 0; for (int v = 0; v < indim[4]; v++) { size_t vo = v * nvox3D; //volumes do not get reduced for (int z = 0; z < indim[3]; z++) { size_t zo = vo + ((z / 2) * ny * nx); for (int y = 0; y < indim[2]; y++) { size_t yo = zo + ((y / 2) * nx); for (int x = 0; x < indim[1]; x++) { size_t xo = yo + ((x+boost) / 2) ; wt[xo]++; o32[xo] += i32[i]; i++; } //x }//y }//z }//vol for (int i = 0; i < (nvox3D * nvol); i++) if (wt[i] > 0) o32[i] /= wt[i]; _mm_free (wt); } else { //if subsamp2offc else subsamp2 int numk = 27;//center voxel and all its neighbors int * kernel = (int *)_mm_malloc((numk*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight int i = 0; for (int z = -1; z <= 1; z++ ) for (int y = -1; y <= 1; y++ ) for (int x = -1; x <= 1; x++ ) { kernel[i] = x + (y * indim[1]) + (z * indim[1] * indim[2]); kernel[i+numk] = x; //left-right wrap detection kernel[i+numk+numk] = y; //anterior-posterior wrap detection kernel[i+numk+numk+numk] = 8/(pow(2,sqr(x)+sqr(y)+sqr(z))); //kernel weight i++; } int boost = 0; //if ((xflip == 1) && (odd == 0)) boost = 1; if ((x_flip == 1) && (x_odd == 0)) boost = 1; //printf("boost %d\n", boost); size_t nvox3Din = indim[1]*indim[2]*indim[3]; size_t o = 0; for (int v = 0; v < nvol; v++) { size_t vi = v * nvox3Din; for (int z = 0; z < nz; z++) { int zi = (2 * z * indim[1] *indim[2]); //printf("%zu \n", zi); for (int y = 0; y < ny; y++) { int yy = y+y; //y*2 input y int yi = zi + (yy * indim[1]); for (int x = 0; x < nx; x++) { //int xx = x+x+xflip; //x*2 input x int xx = x+x+boost; //x*2 input x int xi = yi + xx; //flt sum = 0.0; //flt wt = 0.0; double sum = 0.0; double wt = 0.0; for (int k = 0; k < numk; k++ ) { if ((xi+kernel[k]) < 0) continue; //position would be less than 0 - outside volume, avoid negative values in size_t size_t pos = xi + kernel[k]; //offset if (pos >= nvox3Din) continue; //position outside volume, e.g. slice above top of volume int xin = xx+kernel[k+numk]; if ((xin < 0) || (xin >= indim[1])) continue; //wrap left or right int yin = yy+kernel[k+numk+numk]; if ((yin < 0) || (yin >= indim[2])) continue; //wrap anterior or posterior flt w = kernel[k+numk+numk+numk]; wt += w; sum += i32[vi+pos] * w; } //if (wt > 0.0) //no need to check: every voxel has at least one contributor (itself) o32[o] = sum/wt; //else { // o32[o] = 666.6; o++; } //x }//y }//z }//vol _mm_free (kernel); } //if subsamp2offc else subsamp2 nim->nvox = nvox3D * nvol; nim->nx = nx; nim->ny = ny; nim->nz = nz; nim->dim[1] = nx; nim->dim[2] = ny; nim->dim[3] = nz; nim->dx *= 2; nim->dy *= 2; nim->dz *= 2; nim->pixdim[1] *= 2; nim->pixdim[2] *= 2; nim->pixdim[3] *= 2; //adjust origin mat44 m = xform(nim); vec4 vx = setVec4(0,0,0); vec4 pos = nifti_vect44mat44_mul(vx, m); //vx = setVec4(0.5,0.5,0.5); //vx = setVec4(1.0,0.0,0.0); if (offc) { //printf("%d flip odd %d\n", x_flip, x_odd); if ((x_odd) && (x_flip)) vx = setVec4(-0.5,-0.5,-0.5); //subsamp2offc else vx = setVec4(0.5,0.5,0.5); //subsamp2offc //if (!xflip) { // vx = setVec4(0.5,0.5,0.5); // printf("y\n"); //} } else { if (x_odd) vx = setVec4(0,0,0); //subsamp2 else vx = setVec4(1,0,0); //subsamp2 if (!x_flip) vx = setVec4(0,0,0); } vec4 pos1 = nifti_vect44mat44_mul(vx, m); vx = setVec4(pos1.v[0]-pos.v[0], pos1.v[1]-pos.v[1], pos1.v[2]-pos.v[2]); m.m[0][3] += vx.v[0]; m.m[1][3] += vx.v[1]; m.m[2][3] += vx.v[2]; //scale spatial transform for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) m.m[i][j] *= 2; //apply to both sform and qform in case VTK user for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) { nim->sto_xyz.m[i][j] = m.m[i][j]; nim->qto_xyz.m[i][j] = m.m[i][j]; } free(nim->data); nim->data = dat; return 0; } static int nifti_resize ( nifti_image * nim, flt zx, flt zy, flt zz, int interp_method) { //see AFNI's 3dresample //better than fslmaths: fslmaths can not resample 4D data // time 3dresample -dxyz 4.8 4.8 4.8 -rmode Linear -prefix afni.nii -input rest.nii // time ./sm rest.nii -subsamp2 out.nii //However, aliasing artifacts // time 3dresample -dxyz 4.8 4.8 4.8 -rmode Linear -prefix afni2.nii -input zoneplate3d_129.nii int invox3D = nim->nx*nim->ny*nim->nz; int nvol = nim->nvox/invox3D; if ((nim->nvox < 1) || (nvol < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nx = ceil(nim->nx * zx); int ny = ceil(nim->ny * zy); int nz = ceil(nim->nz * zz); if ((nx == nim->nx) && (ny == nim->ny) && (nz == nim->nz)) return 0; int nvox3D = nx*ny*nz; flt * i32 = (flt *) nim->data; void * dat = (void *)calloc(1, nvox3D * nvol * sizeof(flt)) ; flt * o32 = (flt *) dat; #pragma omp parallel for for (int v = 0; v < nvol; v++) { flt * iv32 = i32 + (v * invox3D); //reduce in X: half the width: 1/2 input file size flt * imgx = _mm_malloc(nx*nim->ny*nim->nz*sizeof(flt), 64); //input values prior to blur if (nx == nim->nx) //no change in x dimension memcpy(imgx, iv32, nx*nim->ny*nim->nz*sizeof(flt)); else { CLIST * contrib = createFilter(nim->nx, nx, interp_method); size_t i = 0; for (size_t y = 0; y < (nim->ny * nim->nz); y++) { for (int x = 0; x < nx; x++) { flt weight = 0.0; for (int j = 0; j < contrib[x].n; j++) weight += iv32[contrib[x].p[j].pixel]* contrib[x].p[j].weight; imgx[i++] = weight; } iv32 += nim->nx; } //for y for (i = 0; i < nx; i++) free(contrib[i].p); free(contrib); } //reduce in Y: half the height: 1/4 input size flt * imgy = _mm_malloc(nx*ny*nim->nz*sizeof(flt), 64); //input values prior to blur if (ny == nim->ny) //no change in y dimension memcpy(imgy, imgx, nx*ny*nim->nz*sizeof(flt)); else { CLIST * contrib = createFilter(nim->ny, ny, interp_method); flt * iny = _mm_malloc(nim->ny*sizeof(flt), 64); //input values prior to resize for (int z = 0; z < nim->nz; z++) { for (int x = 0; x < nx; x++) { int yo = (z * nx * ny) + x; //output int yi = (z * nx * nim->ny) + x;//input for (int j = 0; j < nim->ny; j++) { //iny[j] = imgx[yi+(j*nx)]; iny[j] = imgx[yi]; yi += nx; } for (int y = 0; y < ny; y++) { flt weight = 0.0; for (int j = 0; j < contrib[y].n; j++) weight += iny[contrib[y].p[j].pixel]* contrib[y].p[j].weight; //weight = y; imgy[yo] = weight; yo += nx; } //y } //x } //z _mm_free (iny); for (int i = 0; i < ny; i++) free(contrib[i].p); free(contrib); } _mm_free (imgx); //reduce in Z flt * ov32 = o32 + (v * nvox3D); if (nz == nim->nz) //no change in x dimension memcpy(ov32, imgy, nx*ny*nz*sizeof(flt)); else { CLIST * contrib = createFilter(nim->nz, nz, interp_method); flt * inz = _mm_malloc(nim->nz*sizeof(flt), 64); //input values prior to resize int nxy = nx * ny; for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { int zo = x + (y * nx); //output offset int zi = x + (y * nx); //input offset for (int j = 0; j < nim->nz; j++) { inz[j] = imgy[zi]; zi += nxy; } for (int z = 0; z < nz; z++) { //for (int j = 0; j < nim->nz; j++) // inz[j] = imgy[zi+(j*nx*ny)]; flt weight = 0.0; for (int j = 0; j < contrib[z].n; j++) weight += inz[contrib[z].p[j].pixel]* contrib[z].p[j].weight; //weight = y; ov32[zo] = weight; zo += nx*ny; } //for z } //for x } //for y _mm_free (inz); for (int i = 0; i < nz; i++) free(contrib[i].p); free(contrib); } _mm_free (imgy); } //for v nim->nvox = nvox3D * nvol; nim->nx = nx; nim->ny = ny; nim->nz = nz; nim->dim[1] = nx; nim->dim[2] = ny; nim->dim[3] = nz; nim->dx /= zx; nim->dy /= zy; nim->dz /= zz; nim->pixdim[1] /= zx; nim->pixdim[2] /= zy; nim->pixdim[3] /= zz; //adjust origin - again, just like fslmaths mat44 m = xform(nim); /*vec4 vx = setVec4(0,0,0); vec4 pos = nifti_vect44mat44_mul(vx, m); vx = setVec4(0.5,0.5,0.5); //subsamp2offc vx = setVec4(1,0,0); //subsamp2 vec4 pos1 = nifti_vect44mat44_mul(vx, m); vx = setVec4(pos1.v[0]-pos.v[0], pos1.v[1]-pos.v[1], pos1.v[2]-pos.v[2]); m.m[0][3] += vx.v[0]; m.m[1][3] += vx.v[1]; m.m[2][3] += vx.v[2];*/ m.m[0][0] /= zx; m.m[1][0] /= zx; m.m[2][0] /= zx; m.m[0][1] /= zy; m.m[1][1] /= zy; m.m[2][1] /= zy; m.m[0][2] /= zz; m.m[1][2] /= zz; m.m[2][2] /= zz; for (int i = 0; i < 4; i++) //transform BOTH sform and qform (e.g. ANTs/ITK user) for (int j = 0; j < 4; j++) { nim->sto_xyz.m[i][j] = m.m[i][j]; nim->qto_xyz.m[i][j] = m.m[i][j]; } free(nim->data); nim->data = dat; return 0; } static int essentiallyEqual(float a, float b) { if (isnan(a) && isnan(b)) return 1; //surprisingly, with C nan != nan return fabs(a - b) <= ( (fabs(a) > fabs(b) ? fabs(b) : fabs(a)) * epsilon); } static void nifti_compare(nifti_image * nim, char * fin) { if (nim->nvox < 1) exit( 1); if (nim->datatype != DT_CALC) { fprintf(stderr,"nifti_compare: Unsupported datatype %d\n", nim->datatype); exit( 1); } nifti_image * nim2 = nifti_image_read2(fin, 1); if( !nim2 ) { fprintf(stderr,"** failed to read NIfTI image from '%s'\n", fin); exit(2); } if ((nim->nx != nim2->nx) || (nim->ny != nim2->ny) || (nim->nz != nim2->nz) ) { fprintf(stderr,"** Attempted to process images of different sizes %"PRId64"x%"PRId64"x%"PRId64"vs %"PRId64"x%"PRId64"x%"PRId64"\n", nim->nx,nim->ny,nim->nz, nim2->nx,nim2->ny,nim2->nz); nifti_image_free( nim2 ); exit(1); } if (nim->nvox != nim2->nvox) { fprintf(stderr," Number of volumes differ\n"); nifti_image_free( nim2 ); exit(1); } if (max_displacement_mm(nim, nim2) > 0.5) { //fslmaths appears to use mm not voxel difference to determine alignment, threshold ~0.5mm fprintf(stderr,"WARNING:: Inconsistent orientations for individual images in pipeline! (%gmm)\n", max_displacement_mm(nim, nim2)); fprintf(stderr," Will use voxel-based orientation which is probably incorrect - *PLEASE CHECK*!\n"); } in_hdr ihdr = set_input_hdr(nim2); if (nifti_image_change_datatype(nim2, nim->datatype, &ihdr) != 0) { nifti_image_free( nim2 ); exit(1); } flt * img = (flt *) nim->data; flt * img2 = (flt *) nim2->data; size_t differentVox = nim->nvox; double sum = 0.0; double sum2 = 0.0; double maxDiff = 0.0; size_t nNotNan = 0; size_t nDifferent = 0; for (size_t i = 0; i < nim->nvox; i++ ) { if (!essentiallyEqual(img[i], img2[i])) { if (fabs(img[i]-img2[i]) > maxDiff) { differentVox = i; maxDiff = fabs(img[i]-img2[i]); } nDifferent ++; } if (isnan(img[i]) || isnan(img[i]) ) continue; nNotNan++; sum += img[i]; sum2 += img2[i]; } if (differentVox >= nim->nvox) { //fprintf(stderr,"Images essentially equal\n"); */ nifti_image_free( nim2 ); exit(0); } //second pass - one pass correlation is inaccurate or slow nNotNan = MAX(1, nNotNan); flt mn = INFINITY; //do not set to item 1, in case it is nan flt mx = -INFINITY; flt sd = 0.0; flt ave = sum / nNotNan; flt mn2 = INFINITY; flt mx2 = -INFINITY; flt sd2 = 0.0; flt ave2 = sum2 / nNotNan; //for i := 0 to (n - 1) do // sd := sd + sqr(y[i] - mn); //sd := sqrt(sd / (n - 1)); double sumDx = 0.0; for (size_t i = 0; i < nim->nvox; i++ ) { if (isnan(img[i]) || isnan(img[i]) ) continue; mn = MIN(mn, img[i]); mx = MAX(mx, img[i]); sd += sqr(img[i] - ave); mn2 = MIN(mn2, img2[i]); mx2 = MAX(mx2, img2[i]); sd2 += sqr(img2[i] - ave2); sumDx += (img[i] - ave)*(img2[i] - ave2); } double r = 0.0; nNotNan = MAX(2, nNotNan); if (nim->nvox < 2) { sd = 0.0; sd2 = 0.0; } else { sd = sqrt(sd / (nNotNan - 1)); //if (sd != 0.0) sd = 1.0/sd; sd2 = sqrt(sd2 / (nNotNan - 1)); //if (sd2 != 0.0) sd2 = 1.0/sd2; if ((sd * sd2) != 0.0) r = sumDx/(sd*sd2*(nNotNan - 1)); //r = r / (nim->nvox - 1); } r = MIN(r,1.0); r = MAX(r, -1.0); fprintf(stderr,"Images Differ: Correlation r = %g, identical voxels %d%%\n", r, (int)floor(100.0*(1.0-(double)nDifferent/(double)nim->nvox))); if (nNotNan < nim->nvox) { fprintf(stderr," %"PRId64" voxels have a NaN in at least one image.\n", nim->nvox - nNotNan); fprintf(stderr," Descriptives consider voxels that are numeric in both images.\n"); } fprintf(stderr," Most different voxel %g vs %g (difference %g)\n", img[differentVox], img2[differentVox], maxDiff); int nvox3D = nim->nx * nim->ny * MAX(nim->nz,1); int nVol = nim->nvox/nvox3D; size_t vx[4]; vx[3] = differentVox/nvox3D; vx[2] = (differentVox / (nim->nx*nim->ny)) % nim->nz; vx[1] = (differentVox / nim->nx) % nim->ny; vx[0] = differentVox % nim->nx; fprintf(stderr," Most different voxel locatoin %zux%zux%zu volume %zu\n", vx[0],vx[1],vx[2], vx[3]); fprintf(stderr,"Image 1 Descriptives\n"); fprintf(stderr," Range: %g..%g Mean %g StDev %g\n", mn, mx, ave, sd); fprintf(stderr,"Image 2 Descriptives\n"); fprintf(stderr," Range: %g..%g Mean %g StDev %g\n", mn2, mx2, ave2, sd2); //V1 comparison - EXIT_SUCCESS if all vectors are parallel (for DWI up vector [1 0 0] has same direction as down [-1 0 0]) if (nVol != 3) { nifti_image_free( nim2 ); exit(1); } int allParallel = 1; //niimath ft_V1 -compare nt_V1 for (size_t i = 0; i < nvox3D; i++ ) { //check angle of two vectors... assume unit vectors flt v[3]; //vector, image 1 v[0] = img[i]; v[1] = img[i+nvox3D]; v[2] = img[i+nvox3D+nvox3D]; flt v2[3]; //vector, image 2 v2[0] = img2[i]; v2[1] = img2[i+nvox3D]; v2[2] = img2[i+nvox3D+nvox3D]; flt x[3]; //cross product x[0] = (v[1]*v2[2]) - (v[2]*v2[1]); x[1] = (v[2]*v2[0]) - (v[0]*v2[2]); x[2] = (v[0]*v2[1]) - (v[1]*v2[0]); flt len = sqrt((x[0]*x[0])+(x[1]*x[1])+(x[2]*x[2])); if (len > 0.01) { allParallel = 0; //fprintf(stderr,"[%g %g %g] vs [%g %g %g]\n", v[0],v[1], v[2], v2[0], v2[1], v2[2]); break; } } if ( allParallel ) { fprintf(stderr,"Despite polarity differences, all vectors are parallel.\n"); nifti_image_free( nim2 ); exit(0); } nifti_image_free( nim2 ); exit(1); } //nifti_compare() static int nifti_binary_power ( nifti_image * nim, double v) { //clone operations from ANTS ImageMath: power //https://manpages.debian.org/jessie/ants/ImageMath.1.en.html if (nim->nvox < 1) return 1; if (nim->datatype!= DT_CALC) return 1; flt fv = v; flt * f32 = (flt *) nim->data; for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = pow(f32[i], v); return 0; } static int nifti_binary ( nifti_image * nim, char * fin, enum eOp op) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) { fprintf(stderr,"nifti_binary: Unsupported datatype %d\n", nim->datatype); return 1; } nifti_image * nim2 = nifti_image_read2(fin, 1); if( !nim2 ) { fprintf(stderr,"** failed to read NIfTI image from '%s'\n", fin); return 2; } if ((nim->nx != nim2->nx) || (nim->ny != nim2->ny) || (nim->nz != nim2->nz) ) { fprintf(stderr,"** Attempted to process images of different sizes %"PRId64"x%"PRId64"x%"PRId64" vs %"PRId64"x%"PRId64"x%"PRId64"\n", nim->nx,nim->ny,nim->nz, nim2->nx,nim2->ny,nim2->nz); nifti_image_free( nim2 ); return 1; } if (max_displacement_mm(nim, nim2) > 0.5) { //fslmaths appears to use mm not voxel difference to determine alignment, threshold ~0.5mm fprintf(stderr,"WARNING:: Inconsistent orientations for individual images in pipeline! (%gmm)\n", max_displacement_mm(nim, nim2)); fprintf(stderr," Will use voxel-based orientation which is probably incorrect - *PLEASE CHECK*!\n"); } in_hdr ihdr = set_input_hdr(nim2); if (nifti_image_change_datatype(nim2, nim->datatype, &ihdr) != 0) { nifti_image_free( nim2 ); return 1; } flt * imga = (flt *) nim->data; flt * imgb = (flt *) nim2->data; int nvox3D = nim->nx * nim->ny * nim->nz; int nvola = nim->nvox / nvox3D; int nvolb = nim2->nvox / nvox3D; int rem0 = 0; int swap4D = 0; //if 1: input nim was 3D, but nim2 is 4D: output will be 4D if ((nvolb > 1) && (nim->nvox != nim2->nvox) && ((op == uthr) || (op == thr))) { //"niimath 3D -uthr 4D out" only uses 1st volume of 4D, only one volume out nvolb = 1; //fslmaths printf("threshold operation expects 3D mask\n"); //fslmaths makes not modification to image if (op == uthr) //strictly for fslmaths compatibility - makes no sense for (size_t i = 0; i < nim->nvox; i++ ) imga[i] = 0; nifti_image_free( nim2 ); return 0; } else if (nim->nvox != nim2->nvox) { //situation where one input is 3D and the other is 4D if ((nvola != 1) && ((nvolb != 1))) { fprintf(stderr,"nifti_binary: both images must have the same number of volumes, or one must have a single volume (%d and %d)\n", nvola, nvolb); nifti_image_free( nim2 ); return 1; } if (nvola == 1) { imgb = (flt *) nim->data; imga = (flt *) nim2->data; swap4D = 1; nvolb = nim->nvox / nvox3D; nvola = nim2->nvox / nvox3D; } } //make it so imga/novla >= imgb/nvolb for (int v = 0; v < nvola; v++ ) { // int va = v * nvox3D; //start of volume for image A int vb = (v % nvolb) * nvox3D; //start of volume for image B if (op == add) { for (int i = 0; i < nvox3D; i++ ) imga[va+i] += imgb[vb+i]; } else if (op == sub) { if (swap4D) { for (int i = 0; i < nvox3D; i++ ) { imga[va+i] = imgb[vb+i] - imga[va+i]; //printf(">>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], x, imga[va+i]); } } else { for (int i = 0; i < nvox3D; i++ ) { //printf("[%d]/[%d] %g/%g\n", va+i, vb+i, imga[va+i], imga[vb+i]); imga[va+i] = imga[va+i] - imgb[vb+i]; } } } else if (op == mul) { for (int i = 0; i < nvox3D; i++ ) imga[va+i] *= imgb[vb+i]; } else if (op == max) { for (int i = 0; i < nvox3D; i++ ) imga[va+i] = MAX(imga[va+i], imgb[vb+i]); } else if (op == min) { for (int i = 0; i < nvox3D; i++ ) imga[va+i] = MIN(imga[va+i], imgb[vb+i]); } else if (op == thr) { //thr : use following number to threshold current image (zero anything below the number) for (int i = 0; i < nvox3D; i++ ) if (imga[va+i] < imgb[vb+i]) imga[va+i] = 0; } else if (op == uthr) { //uthr : use following number to upper-threshold current image (zero anything above the number) for (int i = 0; i < nvox3D; i++ ) if (imga[va+i] > imgb[vb+i]) imga[va+i] = 0; } else if (op == mas) { if (swap4D) { for (int i = 0; i < nvox3D; i++ ) { if (imga[va+i] > 0) imga[va+i] = imgb[vb+i]; else imga[va+i] = 0; } } else { for (int i = 0; i < nvox3D; i++ ) if (imgb[vb+i] <= 0) imga[va+i] = 0; } } else if (op == divX) { if (swap4D) { for (int i = 0; i < nvox3D; i++ ) { //flt x = imga[va+i]; if (imga[va+i] != 0.0f) imga[va+i] = imgb[vb+i]/imga[va+i]; //printf(">>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], x, imga[va+i]); } } else { for (int i = 0; i < nvox3D; i++ ) { //printf("[%d]/[%d] %g/%g\n", va+i, vb+i, imga[va+i], imga[vb+i]); if (imgb[vb+i] == 0.0f) imga[va+i] = 0.0f; else imga[va+i] = imga[va+i]/imgb[vb+i]; } } } else if (op == mod) { //afni mod function, divide by zero yields 0 (unlike Matlab, see remtest.m) //fractional remainder: if (swap4D) { for (int i = 0; i < nvox3D; i++ ) { //printf("!>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], imga[va+i], fmod(trunc(imgb[vb+i]), trunc(imga[va+i])) ); if (imga[va+i] != 0.0f) imga[va+i] = fmod(imgb[vb+i], imga[va+i]); else { rem0 = 1; imga[va+i] = 0;//imgb[vb+i]; } } } else { for (int i = 0; i < nvox3D; i++ ) { //printf("?>[%d]/[%d] %g/%g = %g : %g\n", va+i, vb+i, imga[va+i], imgb[vb+i], fmod(imga[va+i], imgb[vb+i]), fmod(trunc(imga[va+i]), trunc(imgb[vb+i])) ); if (imgb[vb+i] != 0.0f) //imga[va+i] = round(fmod(imga[va+i], imgb[vb+i])); imga[va+i] = fmod(imga[va+i], imgb[vb+i]); else { rem0 = 1; imga[va+i] = 0; } } } } else if (op == rem) { //fmod _rem //fractional remainder: if (swap4D) { for (int i = 0; i < nvox3D; i++ ) { //printf("!>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], imga[va+i], fmod(trunc(imgb[vb+i]), trunc(imga[va+i])) ); if (trunc(imga[va+i]) != 0.0f) imga[va+i] = fmod(trunc(imgb[vb+i]), trunc(imga[va+i])); else { rem0 = 1; imga[va+i] = imgb[vb+i]; } } } else { for (int i = 0; i < nvox3D; i++ ) { //printf("?>[%d]/[%d] %g/%g = %g : %g\n", va+i, vb+i, imga[va+i], imgb[vb+i], fmod(imga[va+i], imgb[vb+i]), fmod(trunc(imga[va+i]), trunc(imgb[vb+i])) ); if (trunc(imgb[vb+i]) != 0.0f) //imga[va+i] = round(fmod(imga[va+i], imgb[vb+i])); imga[va+i] = fmod(trunc(imga[va+i]), trunc(imgb[vb+i])); else rem0 = 1; } } } else { fprintf(stderr,"nifti_binary: unsupported operation %d\n", op); nifti_image_free( nim2 ); return 1; } } if (swap4D) { //if 1: input nim was 3D, but nim2 is 4D: output will be 4D nim->nvox = nim2->nvox; nim->ndim = nim2->ndim; nim->nt =nim2->nt; nim->nu =nim2->nu; nim->nv =nim2->nv; nim->nw =nim2->nw; for (int i = 4; i < 8; i++ ) { nim->dim[i] =nim2->dim[i]; nim->pixdim[i] =nim2->pixdim[i]; } nim->dt =nim2->dt; nim->du =nim2->du; nim->dv =nim2->dv; nim->dw =nim2->dw; free(nim->data); nim->data = nim2->data; nim2->data = NULL; } nifti_image_free( nim2 ); if (rem0) { fprintf(stderr,"Warning -rem image included zeros (fslmaths exception)\n"); return 0; } return 0; } // nifti_binary() struct sortIdx { flt val; int idx; }; static int nifti_roc( nifti_image * nim, double fpThresh, const char * foutfile, const char * fnoise, const char * ftruth) { if (nim->datatype != DT_CALC) return 1; //(nim, thresh, argv[outfile], fnoise, argv[truth]); //fslmaths appears to ignore voxels on edge of image, and will crash with small images: // error: sort(): given object has non-finite elements //therefore, there is a margin ("border") around the volume int border = 5; //in voxels int mindim = border + border + 1; //e.g. minimum size has one voxel surrounded by border on each side if ((nim->nx < mindim) || (nim->ny < mindim) || (nim->nz < mindim)) { fprintf(stderr,"volume too small for ROC analyses\n"); return 1; } if (nim->nvox > (nim->nx * nim->ny * nim->nz)) { fprintf(stderr,"ROC input should be 3D image (not 4D)\n"); //fslmaths seg faults return 1; } if ((fpThresh <= 0.0) || (fpThresh >= 1.0)) { fprintf(stderr,"ROC false-positive threshold should be between 0 and 1, not '%g'\n", fpThresh); return 1; } nifti_image * nimTrue = nifti_image_read2(ftruth, 1); if( !nimTrue ) { fprintf(stderr,"** failed to read NIfTI image from '%s'\n", ftruth); exit(2); } if ((nim->nx != nimTrue->nx) || (nim->ny != nimTrue->ny) || (nim->nz != nimTrue->nz) ) { fprintf(stderr,"** Truth image is the wrong size %"PRId64"x%"PRId64"x%"PRId64" vs %"PRId64"x%"PRId64"x%"PRId64"\n", nim->nx,nim->ny,nim->nz, nimTrue->nx,nimTrue->ny,nimTrue->nz); nifti_image_free( nimTrue ); exit(1); } if (nimTrue->nvox > (nimTrue->nx * nimTrue->ny * nimTrue->nz)) { fprintf(stderr,"ROC truth should be 3D image (not 4D)\n"); //fslmaths seg faults return 1; } nifti_image * nimNoise = NULL; //count number of tests //If the truth image contains negative voxels these get excluded from all calculations int nTest = 0; int nTrue = 0; size_t i = 0; flt * imgTrue = (flt *) nimTrue->data; flt * imgObs = (flt *) nim->data; for (int z = 0; z < nim->nz; z++ ) for (int y = 0; y < nim->ny; y++ ) for (int x = 0; x < nim->nx; x++ ) { if ((imgTrue[i] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border)) ) { nTest++; if (imgTrue[i] > 0) nTrue++; } i++; } if (nTest < 1) { fprintf(stderr,"** All truth voxels inside border are negative\n"); exit(1); } //printf("%d %d = %d\n", nTrue, nFalse, nTest); if (nTest == nTrue) fprintf(stderr,"Warning: All truth voxels inside border are the same (all true or all false)\n"); struct sortIdx * k = (struct sortIdx *)_mm_malloc(nTest*sizeof(struct sortIdx), 64); //load the data nTest = 0; i = 0; for (int z = 0; z < nim->nz; z++ ) for (int y = 0; y < nim->ny; y++ ) for (int x = 0; x < nim->nx; x++ ) { if ((imgTrue[i] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border)) ) { k[nTest].val = imgObs[i]; k[nTest].idx = imgTrue[i] > 0; nTest++; } i++; } qsort(k, nTest, sizeof(struct sortIdx), compare); //for (int v = 0; v < nvol; v++ ) // f32[ k[v].idx ] = v + 1; //printf("%d tests, intensity range %g..%g\n", nTest, k[0].val, k[nTest-1].val); FILE* txt = fopen(foutfile, "w+"); flt threshold = k[nTest-1].val; //maximum observed intensity int bins = 1000; //step size: how often are results reported flt step = (threshold-k[0].val)/bins; //[max-min]/bins int fp = 0; int tp = 0; if (fnoise != NULL) { nimNoise = nifti_image_read2(fnoise, 1); if ((nim->nx != nimNoise->nx) || (nim->ny != nimNoise->ny) || (nim->nz != nimNoise->nz) ) { fprintf(stderr,"** Noise image is the wrong size %"PRId64"x%"PRId64"x%"PRId64" vs %"PRId64"x%"PRId64"x%"PRId64"\n", nim->nx,nim->ny,nim->nz, nimNoise->nx,nimNoise->ny,nimNoise->nz); nifti_image_free( nimTrue ); nifti_image_free( nimNoise ); exit(1); } //Matlab script roc.m generates samples you can process with fslmaths.\ // The fslmaths text file includes two additional columns of output not described by the help documentation // Appears to find maximum signal in each noise volume, regardless of whether it is a hit or false alarm. int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nimNoise->nvox / nvox3D; if (nvol < 10) fprintf(stderr,"Warning: Noise images should include many volumes for estimating familywise error/\n"); flt * imgNoise = (flt *) nimNoise->data; flt * mxVox = (flt *)_mm_malloc(nvol*sizeof(flt), 64); for (int v = 0; v < nvol; v++ ) { //for each volume mxVox[v] = -INFINITY; size_t vo = v * nvox3D; size_t vi = 0; for (int z = 0; z < nim->nz; z++ ) for (int y = 0; y < nim->ny; y++ ) for (int x = 0; x < nim->nx; x++ ) { if ((imgTrue[vi] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border)) ) mxVox[v] = MAX(mxVox[v], imgNoise[vo+vi]); vi++; } } //for each volume nifti_image_free( nimNoise ); qsort (mxVox, nvol, sizeof(flt), compare); int idx = nTest - 1; flt mxNoise = mxVox[nvol-1]; while ((idx >= 1) && (k[idx].val > mxNoise)) { tp ++; idx --; if ((k[idx].val != k[idx-1].val) && (k[idx].val <= threshold) ) { fprintf(txt, "%g %g %g\n", (double)fp/(double)nvol, (double)tp/(double)nTrue, threshold); threshold = threshold - step; //delay next report } } //more significant than any noise... int fpThreshInt = round(fpThresh * nvol); //stop when number of false positives exceed this for (int i = nvol-1; i >= 1; i--) { fp ++; //false alarm while ((idx >= 1) && (k[idx].val >= mxVox[i])) { tp ++; idx --; if ((k[idx].val != k[idx-1].val) && (k[idx].val <= threshold) ) { fprintf(txt, "%g %g %g\n", (double)fp/(double)nvol, (double)tp/(double)nTrue, threshold); threshold = threshold - step; //delay next report } } //at least as significant as current noise if ((fp > fpThreshInt) || ((k[i].val != k[i-1].val) && (k[i].val <= threshold) ) ) { //printf("%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold); fprintf(txt, "%g %g %g\n", (double)fp/(double)nvol, (double)tp/(double)nTrue, threshold); threshold = threshold - step; //delay next report } if (fp > fpThreshInt) break; } //inspect all tests... _mm_free (mxVox); exit(1); } else { //if noise image else infer FP/TP from input image int nFalse = nTest - nTrue; int fpThreshInt = ceil(fpThresh * nFalse); //stop when number of false positives exceed this for (int i = nTest-1; i >= 1; i--) { if (k[i].idx == 0) fp ++; //false alarm else tp ++; //hit if ((fp > fpThreshInt) || ((k[i].val != k[i-1].val) && (k[i].val <= threshold) ) ) { //printf("%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold); fprintf(txt, "%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold); threshold = threshold - step; //delay next report } if (fp > fpThreshInt) break; } //inspect all tests... } //if noise else... fclose(txt); _mm_free (k); nifti_image_free( nimTrue ); return 0; } static int nifti_fillh (nifti_image * nim, int is26) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; //size_t nxy = nim->nx * nim->ny; //slice increment uint8_t * vx = (uint8_t *)_mm_malloc(nim->nvox*sizeof(uint8_t), 64); memset(vx, 0, nim->nvox*sizeof(uint8_t)); size_t n1 = 0; flt * f32 = (flt *) nim->data; for (size_t i = 0; i < nim->nvox; i++ ) if (f32[i] > 0.0) { n1++; vx[i] = 1; } if ((n1 < 1) || (nim->nx < 3) || (nim->ny < 3) || (nim->nz < 3)) { //if fewer than 3 rows, columns or slices all voxels touch edge. //only a binary threshold, not a flood fill for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = vx[i]; _mm_free (vx); return 1; } //set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap int numk = 6; if (is26) numk = 26; int32_t * k = (int32_t *)_mm_malloc(numk*sizeof(int32_t), 64); //queue with untested seed if (is26) { int j = 0; for (int z = -1; z <= 1; z++ ) for (int y = -1; y <= 1; y++ ) for (int x = -1; x <= 1; x++ ) { k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny); j++; } //for x } else { //if 26 neighbors else 6.. k[0] = nim->nx * nim->ny; //up k[1] = -k[0]; //down k[2] = nim->nx; //anterior k[3] = -k[2]; //posterior k[4] = 1; //left k[5] = -1; } //https://en.wikipedia.org/wiki/Flood_fill #pragma omp parallel for for (int v = 0; v < nvol; v++ ) { uint8_t * vxv = vx; vxv += (v * nvox3D); uint8_t * vxs = (uint8_t *)_mm_malloc(nim->nvox*sizeof(uint8_t), 64); memcpy(vxs, vxv, nvox3D*sizeof(uint8_t)); //dst, src int32_t * q = (int32_t *)_mm_malloc(nvox3D*sizeof(int32_t), 64); //queue with untested seed int qlo = 0; int qhi = -1; //ints always signed in C! //load edges size_t i = 0; for (int z = 0; z < nim->nz; z++ ) { int zedge = 0; if ((z == 0) || (z == (nim->nz-1))) zedge = 1; for (int y = 0; y < nim->ny; y++ ) { int yedge = 0; if ((y == 0) || (y == (nim->ny-1))) yedge = 1; for (int x = 0; x < nim->nx; x++ ) { if ((vxs[i] == 0) && (zedge || yedge || (x == 0) || (x == (nim->nx-1))) ) { //found new seed vxs[i] = 1; //do not find again qhi++; q[qhi] = i; } // new seed i++; } //for x }//y } //z //printf("seeds %d kernel %d\n", qhi+1, numk); //run a 'first in, first out' queue while (qhi >= qlo) { //retire one seed, add 0..6 new ones (fillh) or 0..26 new ones (fillh26) for (int j = 0; j < numk; j++) { int jj = q[qlo] + k[j]; if ((jj < 0) || (jj >= nvox3D)) continue; if (vxs[jj] != 0) continue; //add new seed; vxs[jj] = 1; qhi++; q[qhi] = jj; } qlo++; } //while qhi >= qlo: continue until all seeds tested for (size_t i = 0; i < nvox3D; i++ ) if (vxs[i] == 0) vxv[i] = 1; //hidden internal voxel not found from the fill _mm_free (vxs); _mm_free (q); } //for each volume for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = vx[i]; _mm_free (vx); _mm_free (k); return 0; } static void rand_test() { //https://www.phoronix.com/scan.php?page=news_item&px=Linux-RdRand-Sanity-Check int r0 = rand(); for (int i = 0; i < 7; i++ ) if (rand() != r0) return; fprintf(stderr,"RDRAND gives funky output: update firmware\n"); } static int nifti_unary ( nifti_image * nim, enum eOp op) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) { fprintf(stderr,"nifti_unary: Unsupported datatype %d\n", nim->datatype); return 1; } flt * f32 = (flt *) nim->data; if (op == exp1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = exp(f32[i]); } else if (op == log1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = log(f32[i]); } else if (op == sin1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = sin(f32[i]); } else if (op == cos1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = cos(f32[i]); } else if (op == tan1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = tan(f32[i]); } else if (op == asin1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = asin(f32[i]); } else if (op == acos1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = acos(f32[i]); } else if (op == atan1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = atan(f32[i]); } else if (op == sqr1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = f32[i]*f32[i]; //<- pow(a,x) uses flt for x } else if (op == sqrt1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = sqrt(f32[i]); } else if (op == recip1) { for (size_t i = 0; i < nim->nvox; i++ ) { if (f32[i] == 0.0f) continue; f32[i] = 1.0 / f32[i]; } } else if (op == abs1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = fabs(f32[i]); } else if (op == bin1) { for (size_t i = 0; i < nim->nvox; i++ ) { if (f32[i] > 0) f32[i] = 1.0f; else f32[i] = 0.0f; } } else if (op == binv1) { for (size_t i = 0; i < nim->nvox; i++ ) { if (f32[i] > 0) f32[i] = 0.0f; else f32[i] = 1.0f; } } else if (op == edge1) { if ((nim->dx == 0.0) || (nim->dy == 0.0) || (nim->dz == 0.0)) { fprintf(stderr,"edge requires non-zero pixdim1/pixdim2/pixdim3\n"); return 1; } flt xscl = 1.0/(sqr(nim->dx)); flt yscl = 1.0/(sqr(nim->dy)); flt zscl = 1.0/(sqr(nim->dz)); flt xyzscl = 1.0/(2.0 * sqrt(xscl+yscl+zscl)); if (nim->dim[3] < 2) { //no slices 'above' or 'below' for 2D size_t nxy = nim->nx * nim->ny; //slice increment int nvol = nim->nvox / nxy; if ((nvol * nxy) != nim->nvox) return 1; #pragma omp parallel for for (int v = 0; v < nvol; v++ ) { //find maximum for each entire volume (excepted observed volume 0) flt * inp = (flt *)_mm_malloc(nxy*sizeof(flt), 64); flt *o32 = (flt *) f32; o32 += v * nxy; memcpy(inp, o32, nxy*sizeof(flt)); //dst, src for (int y = 1; (y < (nim->ny -1)); y++ ) { size_t yo =y * nim->nx; for (int x = 1; (x < (nim->nx -1)); x++ ) { size_t vx = yo + x; flt xv = sqr(inp[vx+1] - inp[vx-1]) * xscl; flt yv = sqr(inp[vx+nim->nx] - inp[vx-nim->nx]) * yscl; o32[vx] = sqrt(xv+yv)*xyzscl; } //x } //y _mm_free (inp); }//for v return 1; } //edge for 2D volume(s) int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; size_t nxy = nim->nx * nim->ny; //slice increment #pragma omp parallel for for (int v = 0; v < nvol; v++ ) { //find maximum for each entire volume (excepted observed volume 0) flt * inp = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); flt *o32 = (flt *) f32; o32 += v * nvox3D; memcpy(inp, o32, nvox3D*sizeof(flt)); //dst, src for (int z = 1; (z < (nim->nz -1)); z++ ) { size_t zo = z * nxy; for (int y = 1; (y < (nim->ny -1)); y++ ) { size_t yo =y * nim->nx; for (int x = 1; (x < (nim->nx -1)); x++ ) { size_t vx = zo + yo + x; flt xv = sqr(inp[vx+1] - inp[vx-1]) * xscl; flt yv = sqr(inp[vx+nim->nx] - inp[vx-nim->nx]) * yscl; flt zv = sqr(inp[vx+nxy] - inp[vx-nxy]) * zscl; o32[vx] = sqrt(xv+yv+zv)*xyzscl; } //x } //y } //z _mm_free (inp); }//for v return 1; //edge for 3D volume(s) } else if (op == index1) { //nb FSLmaths flips dim[1] depending on determinant size_t idx = 0; if (!neg_determ(nim)) { //flip x size_t nyzt = nim->nvox / nim->nx; if ((nyzt * nim->nx) != nim->nvox) return 1; for (size_t i = 0; i <nyzt; i++ ) { size_t row = i * nim->nx;; int x = nim->nx; while (x > 0) { x--; if (f32[row+x] != 0) f32[row+x] = idx++; } //for each column (x) } //for each row (yzt) } else //don't flip x for (size_t i = 0; i < nim->nvox; i++ ) if (f32[i] != 0) f32[i] = idx++; } else if (op == nan1) { for (size_t i = 0; i < nim->nvox; i++ ) if (isnan(f32[i])) f32[i] = 0.0; } else if (op == nanm1) { for (size_t i = 0; i < nim->nvox; i++ ) if (isnan(f32[i])) f32[i] = 1.0; else f32[i] = 0.0; } else if (op == rand1) { rand_test(); flt scl = (1.0 / RAND_MAX); for (size_t i = 0; i < nim->nvox; i++ ) f32[i] += rand() * scl; } else if (op == randn1) { rand_test(); //https://en.wikipedia.org/wiki/Box–Muller_transform //for SIMD see https://github.com/miloyip/normaldist-benchmark static const flt sigma = 1.0f; static const flt mu = 0.0; //static const flt epsilon = FLT_EPSILON; static const flt two_pi = 2.0*3.14159265358979323846; static const flt scl = (1.0 / RAND_MAX); //fill pairs for (size_t i = 0; i < (nim->nvox-1); i += 2 ) { flt u1, u2; do { u1 = rand() * scl; u2 = rand() * scl; } while (u1 <= epsilon); flt su1 = sqrt(-2.0 * log(u1)); flt z0 = su1 * cos(two_pi * u2); flt z1 = su1 * sin(two_pi * u2); f32[i] += z0 * sigma + mu; f32[i+1] += z1 * sigma + mu; } //if odd, fill final voxel if ( nim->nvox %2 != 0 ) { flt u1, u2; do { u1 = rand() * scl; u2 = rand() * scl; } while (u1 <= epsilon); flt z0 = sqrt(-2.0 * log(u1)) * cos(two_pi * u2); f32[nim->nvox-1] += z0 * sigma + mu; } } else if (op == range1) { flt mn = f32[0]; flt mx = mn; for (size_t i = 0; i < nim->nvox; i++ ) { mn = fmin(f32[i], mn); mx = fmax(f32[i],mx); } nim->cal_min = mn; nim->cal_max = mx; } else if (op == rank1) { int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1 ) { //you are always first if you are the only one to show up... for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = 1; } else { #pragma omp parallel for for (int i = 0; i < nvox3D; i++ ) { //how do we handle ties? struct sortIdx * k = (struct sortIdx *)_mm_malloc(nvol*sizeof(struct sortIdx), 64); size_t j = i; for (int v = 0; v < nvol; v++ ) { k[v].val = f32[j]; k[v].idx = j; j += nvox3D; } int varies = 0; for (int v = 0; v < nvol; v++ ) { if (k[v].val != k[0].val) { varies = 1; break; } } if (varies) { qsort (k, nvol, sizeof(struct sortIdx), compare); for (int v = 0; v < nvol; v++ ) f32[ k[v].idx ] = v + 1; } else { j = i; for (int v = 0; v < nvol; v++ ) { f32[j] = v + 1; j += nvox3D; } } _mm_free (k); } //for i } //nvol > 1 } else if ((op == rank1) || (op == ranknorm1)) { int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1 ) { //you are always first if you are the only one to show up... for (int i = 0; i < nim->nvox; i++ ) f32[i] = 0; } else { #pragma omp parallel for for (int i = 0; i < nvox3D; i++ ) { struct sortIdx * k = (struct sortIdx *)_mm_malloc(nvol*sizeof(struct sortIdx), 64); size_t j = i; double sum = 0.0; for (int v = 0; v < nvol; v++ ) { k[v].val = f32[j]; sum += k[v].val; k[v].idx = j; j += nvox3D; } double mean = sum / nvol; double sumSqr = 0.0; for (int v = 0; v < nvol; v++ ) sumSqr += sqr(k[v].val- mean); double stdev = sqrt(sumSqr / (nvol - 1)); qsort (k, nvol, sizeof(struct sortIdx), compare); //strange formula, but replicates fslmaths, consider nvol=3 rank[2,0,1] will be pval [2.5/3, 1.5/3, 0.5/3] for (int v = 0; v < nvol; v++ ) f32[ k[v].idx ] = (stdev * -qginv((double)(v + 0.5)/(double)nvol)) + mean; _mm_free (k); } //for i } //nvol > 1 //double qginv( double p ) } else if (op == ztop1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = qg(f32[i]); } else if (op == ptoz1) { /*** given p, return x such that Q(x)=p, for 0 < p < 1 ***/ // #ifdef DT32 const flt kNaN = NAN; //const flt kNaN = 0.0 / 0.0; for (size_t i = 0; i < nim->nvox; i++ ) { if ((f32[i] < 0.0) || (f32[i] > 1.0)) f32[i] = kNaN; else f32[i] = qginv(f32[i]); } } else if ((op == pval1) || (op == pval01)) { int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1 ) { fprintf(stderr,"permutation tests require 4D datasets.\n"); return 1; } void * dat = (void *)calloc(1, nvox3D * sizeof(flt)) ; flt *o32 = (flt *) dat; #pragma omp parallel for for (int i = 0; i < nvox3D; i++ ) { size_t vi = i; flt obs = f32[vi]; //observed value - see if it is extreme relative to permutations int nNotZero = 0; int nGreater = 0; int nEqual = 0; //observation in first volume flt f32v0 = f32[vi]; for (int v = 0; v < nvol; v++ ) { if (f32[vi] != 0) nNotZero ++; if (f32[vi] == f32v0) nEqual ++; if (f32[vi] >= obs) nGreater ++; vi += nvox3D; } if (op == pval1) { //if (nEqual == nvol) // o32[i] = 0.0; //else o32[i] = (double)nGreater / (double) nvol ; } else { if (nEqual == nvol) o32[i] = 0.0; else if (obs == 0) o32[i] = 1.0; else //nZero must be at least 1: the observed data is not zero o32[i] = (double)nGreater / (double) (nNotZero) ; } } //for i nim->nvox = nvox3D; nim->ndim = 3; nim->nt = 1; nim->dim[0] = 3; nim->dim[4] = 1; free(nim->data); nim->data = dat; } else if (op == cpval1) { int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1 ) { fprintf(stderr,"permutation tests require 4D datasets.\n"); return 1; } void * dat = (void *)calloc(1, nvox3D * sizeof(flt)) ; flt *o32 = (flt *) dat; flt * vmax = (flt *)_mm_malloc(nvol*sizeof(flt), 64); #pragma omp parallel for for (int v = 1; v < nvol; v++ ) { //find maximum for each entire volume (excepted observed volume 0) size_t vo = v * nvox3D; flt mx = f32[vo]; for (int i = 0; i < nvox3D; i++ ) mx = MAX(mx, f32[vo+i]); vmax[v] = mx; //printf("%d %g\n", v, mx); } #pragma omp parallel for for (int i = 0; i < nvox3D; i++ ) { flt obs = f32[i]; //observed value - see if it is extreme relative to permutations int nGreater = 1; //count observation for (int v = 1; v < nvol; v++ ) if (vmax[v] >= obs) nGreater ++; o32[i] = (double)nGreater / (double) nvol ; } //for i _mm_free (vmax); nim->nvox = nvox3D; nim->ndim = 3; nim->nt = 1; nim->dim[0] = 3; nim->dim[4] = 1; free(nim->data); nim->data = dat; } else { fprintf(stderr,"nifti_unary: Unsupported operation\n"); return 1; } return 0; }//nifti_unary() static int nifti_thrp(nifti_image * nim, double v, enum eOp op) { // -thrp: use following percentage (0-100) of ROBUST RANGE to threshold current image (zero anything below the number) // -thrP: use following percentage (0-100) of ROBUST RANGE of non-zero voxels and threshold below // -uthrp : use following percentage (0-100) of ROBUST RANGE to upper-threshold current image (zero anything above the number) // -uthrP : use following percentage (0-100) of ROBUST RANGE of non-zero voxels and threshold above if ((v <= 0.0) || (v >= 100.0)) { fprintf(stderr,"nifti_thrp: threshold should be between 0..100\n"); return 1; } flt pct2, pct98; int ignoreZeroVoxels = 0; if ((op == thrP) || (op == uthrP)) ignoreZeroVoxels = 1; if (nifti_robust_range(nim, &pct2, &pct98, ignoreZeroVoxels) != 0) return 1; flt thresh = pct2 + ((v/100.0) * (pct98-pct2)); int zeroBrightVoxels = 0; if ((op == uthrp) || (op == uthrP)) zeroBrightVoxels = 1; nifti_thr(nim, thresh, zeroBrightVoxels); return 0; } //nifti_thrp() #ifdef DT32 int main32(int argc, char * argv[]) { #else int main64(int argc, char * argv[]) { printf("beta: Using 64-bit calc\n"); #endif char * fin=NULL, * fout=NULL; //fslmaths in.nii out.nii changes datatype to flt, here we retain (similar to earlier versions of fslmaths) //fslmsths in.nii -rem 10 out.nii uses integer modulus not fmod //fslmaths robust range not fully described, this emulation is close //fslmaths ing/inm are listed as "unary" but should be listed as binary if( argc < 3 ) return show_helpx(); //minimal command has input and output: "niimath in.nii out.nii" int dtCalc = DT_FLOAT32; //data type for calculation int dtOut = DT_FLOAT32; //data type for calculation int ac = 1; // '-dt' sets datatype for calculations if( ! strcmp(argv[ac], "-dt") ) { if (! strcmp(argv[ac+1], "double") ) { dtCalc = DT_FLOAT64; } else if (strcmp(argv[ac+1], "float") ) { fprintf(stderr,"'-dt' error: only float or double calculations supported\n"); return 1; } ac += 2; if( argc < (ac+2) ) return 1; //insufficient arguments remain } //special case: pass through // no calculation, simple pass through copy, e.g. "niimaths in.nii out.nii.gz" // note fslmaths would save as flt type... but lossless conversion in native format is faster // note here we use nifti_image_read not nifti_image_read2 to preserve cal_min, cal_max if (ac+2 == argc) { fin = argv[ac]; /* no string copy, just pointer assignment */ ac ++; nifti_image * nim = nifti_image_read(fin, 1); fout = argv[ac]; /* no string copy, just pointer assignment */ ac ++; if (nifti_set_filenames(nim, fout, 0, 1) ) return 1; nifti_save(nim, ""); //nifti_image_write( nim ); nifti_image_free( nim ); return 0; } //end pass through // next argument is input file fin = argv[ac]; /* no string copy, just pointer assignment */ ac ++; //clock_t startTime = clock(); nifti_image * nim = nifti_image_read2(fin, 1); if( !nim ) { fprintf(stderr,"** failed to read NIfTI image from '%s'\n", fin); return 2; } //printf("read time: %ld ms\n", timediff(startTime, clock())); in_hdr ihdr = set_input_hdr(nim); int nkernel = 0; //number of voxels in kernel int * kernel = make_kernel(nim, &nkernel, 3,3,3); //check for "-odt" must be last couplet if ( ! strcmp(argv[argc-2], "-odt") ) { if (! strcmp(argv[argc-1], "double") ) { dtOut = DT_FLOAT64; } else if (! strcmp(argv[argc-1], "flt") ) { dtOut = DT_FLOAT32; } else if (! strcmp(argv[argc-1], "int") ) { dtOut = DT_INT32; } else if (! strcmp(argv[argc-1], "short") ) { dtOut = DT_INT16; } else if (! strcmp(argv[argc-1], "ushort") ) { dtOut = DT_UINT16; } else if (! strcmp(argv[argc-1], "char") ) { dtOut = DT_UINT8; } else if (! strcmp(argv[argc-1], "input") ) { dtOut = nim->datatype;//ihdr.datatype; //! } else { fprintf(stderr,"Error: Unknown datatype '%s' - Possible datatypes are: char short ushort int flt double input\n", argv[argc-1]); return 2; } argc = argc - 2; } //odt //convert data to calculation type (-dt) if (nifti_image_change_datatype(nim, dtCalc, &ihdr) != 0) return 1; //check output filename, e.g does file exist fout = argv[argc-1]; /* no string copy, just pointer assignment */ if( nifti_set_filenames(nim, fout, 0, 1) ) return 1; argc = argc - 1; #if defined(_OPENMP) const int maxNumThreads = omp_get_max_threads(); const char *key = "AFNI_COMPRESSOR"; char *value; value = getenv(key); //export AFNI_COMPRESSOR=PIGZ char pigzKey[5] = "PIGZ"; if ((value != NULL) && (strstr(value,pigzKey))) { omp_set_num_threads(maxNumThreads); fprintf(stderr,"Using %d threads\n", maxNumThreads); } else { omp_set_num_threads(1); fprintf(stderr,"Single threaded\n"); } #endif //read operations char* end; int ok = 0; while (ac < argc) { enum eOp op = unknown; if ( ! strcmp(argv[ac], "-add") ) op = add; if ( ! strcmp(argv[ac], "-sub") ) op = sub; if ( ! strcmp(argv[ac], "-mul") ) op = mul; if ( ! strcmp(argv[ac], "-div") ) op = divX; if ( ! strcmp(argv[ac], "-rem") ) op = rem; if ( ! strcmp(argv[ac], "-mod") ) op = mod; if ( ! strcmp(argv[ac], "-mas") ) op = mas; if ( ! strcmp(argv[ac], "-thr") ) op = thr; if ( ! strcmp(argv[ac], "-thrp") ) op = thrp; if ( ! strcmp(argv[ac], "-thrP") ) op = thrP; if ( ! strcmp(argv[ac], "-uthr") ) op = uthr; if ( ! strcmp(argv[ac], "-uthrp") ) op = uthrp; if ( ! strcmp(argv[ac], "-uthrP") ) op = uthrP; if ( ! strcmp(argv[ac], "-max") ) op = max; if ( ! strcmp(argv[ac], "-min") ) op = min; if ( ! strcmp(argv[ac], "-max") ) op = max; //if ( ! strcmp(argv[ac], "-addtozero") ) op = addtozero; //variation of mas //if ( ! strcmp(argv[ac], "-overadd") ) op = overadd; //variation of mas if ( ! strcmp(argv[ac], "power") ) op = power; if ( ! strcmp(argv[ac], "-seed") ) op = seed; //if ( ! strcmp(argv[ac], "-restart") ) op = restart; //if ( ! strcmp(argv[ac], "-save") ) op = save; if ( ! strcmp(argv[ac], "-inm") ) op = inm; if ( ! strcmp(argv[ac], "-ing") ) op = ing; if ( ! strcmp(argv[ac], "-s") ) op = smth; if ( ! strcmp(argv[ac], "-exp") ) op = exp1; if ( ! strcmp(argv[ac], "-log") ) op = log1; if ( ! strcmp(argv[ac], "-sin") ) op = sin1; if ( ! strcmp(argv[ac], "-cos") ) op = cos1; if ( ! strcmp(argv[ac], "-tan") ) op = tan1; if ( ! strcmp(argv[ac], "-asin") ) op = asin1; if ( ! strcmp(argv[ac], "-acos") ) op = acos1; if ( ! strcmp(argv[ac], "-atan") ) op = atan1; if ( ! strcmp(argv[ac], "-sqr") ) op = sqr1; if ( ! strcmp(argv[ac], "-sqrt") ) op = sqrt1; if ( ! strcmp(argv[ac], "-recip") ) op = recip1; if ( ! strcmp(argv[ac], "-abs") ) op = abs1; if ( ! strcmp(argv[ac], "-bin") ) op = bin1; if ( ! strcmp(argv[ac], "-binv") ) op = binv1; if ( ! strcmp(argv[ac], "-edge") ) op = edge1; if ( ! strcmp(argv[ac], "-index") ) op = index1; if ( ! strcmp(argv[ac], "-nan") ) op = nan1; if ( ! strcmp(argv[ac], "-nanm") ) op = nanm1; if ( ! strcmp(argv[ac], "-rand") ) op = rand1; if ( ! strcmp(argv[ac], "-randn") ) op = randn1; if ( ! strcmp(argv[ac], "-range") ) op = range1; if ( ! strcmp(argv[ac], "-rank") ) op = rank1; if ( ! strcmp(argv[ac], "-ranknorm") ) op = ranknorm1; if ( ! strcmp(argv[ac], "-ztop") ) op = ztop1; if ( ! strcmp(argv[ac], "-ptoz") ) op = ptoz1; if ( ! strcmp(argv[ac], "-pval") ) op = pval1; if ( ! strcmp(argv[ac], "-pval0") ) op = pval01; if ( ! strcmp(argv[ac], "-cpval") ) op = cpval1; //kernel operations if ( ! strcmp(argv[ac], "-dilM") ) op = dilMk; if ( ! strcmp(argv[ac], "-dilD") ) op = dilDk; if ( ! strcmp(argv[ac], "-dilF") ) op = dilFk; if ( ! strcmp(argv[ac], "-dilall") ) op = dilallk; if ( ! strcmp(argv[ac], "-ero") ) op = erok; if ( ! strcmp(argv[ac], "-eroF") ) op = eroFk; if ( ! strcmp(argv[ac], "-fmedian") ) op = fmediank; if ( ! strcmp(argv[ac], "-fmean") ) op = fmeank; if ( ! strcmp(argv[ac], "-fmeanu") ) op = fmeanuk; if ( ! strcmp(argv[ac], "-p") ) { ac++; #if defined(_OPENMP) int nProcessors = atoi(argv[ac]); if (nProcessors < 1) { omp_set_num_threads(maxNumThreads); fprintf(stderr,"Using %d threads\n", maxNumThreads); } else omp_set_num_threads(nProcessors); #else fprintf(stderr,"Warning: not compiled for OpenMP: '-p' ignored\n"); #endif } else //All Dimensionality reduction operations names begin with Capital letter, no other commands do! if ((strlen(argv[ac]) > 4) && (argv[ac][0] == '-') && (isupper(argv[ac][1]))) { //isupper int dim = 0; switch (argv[ac][1]) { case 'X': // dim = 1; break; case 'Y': // code to be executed if n = 2; dim = 2; break; case 'Z': // dim = 3; break; case 'T': // code to be executed if n = 2; dim = 4; break; } if (dim == 0) { fprintf(stderr,"Error: unknown dimensionality reduction operation: %s\n", argv[ac]); goto fail; } if ( strstr(argv[ac], "mean") ) ok = nifti_dim_reduce(nim, Tmean, dim, 0); else if ( strstr(argv[ac], "std") ) ok = nifti_dim_reduce(nim, Tstd, dim, 0); else if ( strstr(argv[ac], "maxn") ) ok = nifti_dim_reduce(nim, Tmaxn, dim, 0); //test maxn BEFORE max else if ( strstr(argv[ac], "max") ) ok = nifti_dim_reduce(nim, Tmax, dim, 0); else if ( strstr(argv[ac], "min") ) ok = nifti_dim_reduce(nim, Tmin, dim, 0 ); else if ( strstr(argv[ac], "median") ) ok = nifti_dim_reduce(nim, Tmedian, dim, 0); else if ( strstr(argv[ac], "perc") ) { ac++; int pct = atoi(argv[ac]); ok = nifti_dim_reduce(nim, Tperc, dim, pct); } else if ( strstr(argv[ac], "ar1") ) ok = nifti_dim_reduce(nim, Tar1, dim, 0); else { fprintf(stderr,"Error unknown dimensionality reduction operation: %s\n", argv[ac]); ok = 1; } } else if ( ! strcmp(argv[ac], "-roi") ) { //int , int , int , int , int , int , int , int ) if ((argc-ac) < 8) { fprintf(stderr,"not enough arguments for '-roi'\n"); //start.size for 4 dimensions: user might forget volumes goto fail; } ac ++; int xmin = atoi(argv[ac]); ac ++; int xsize = atoi(argv[ac]); ac ++; int ymin = atoi(argv[ac]); ac ++; int ysize = atoi(argv[ac]); ac ++; int zmin = atoi(argv[ac]); ac ++; int zsize = atoi(argv[ac]); ac ++; int tmin = atoi(argv[ac]); ac ++; int tsize = atoi(argv[ac]); nifti_roi(nim, xmin, xsize, ymin, ysize, zmin, zsize, tmin, tsize); } else if ( ! strcmp(argv[ac], "-bptfm") ) { ac++; double hp_sigma = strtod(argv[ac], &end); ac++; double lp_sigma = strtod(argv[ac], &end); ok = nifti_bptf(nim, hp_sigma, lp_sigma, 0); } else if ( ! strcmp(argv[ac], "-bptf") ) { ac++; double hp_sigma = strtod(argv[ac], &end); ac++; double lp_sigma = strtod(argv[ac], &end); //ok = nifti_bptf(nim, hp_sigma, lp_sigma); ok = nifti_bptf(nim, hp_sigma, lp_sigma, 1); #ifdef bandpass } else if ( ! strcmp(argv[ac], "-bandpass") ) { // niimath test4D -bandpass 0.08 0.008 0 c ac++; double lp_hz = strtod(argv[ac], &end); ac++; double hp_hz = strtod(argv[ac], &end); ac++; double TRsec = strtod(argv[ac], &end); ok = nifti_bandpass(nim, lp_hz, hp_hz, TRsec); #endif } else if ( ! strcmp(argv[ac], "-roc") ) { //-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth> //-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth> ac++; double thresh = strtod(argv[ac], &end); ac++; int outfile = ac; char * fnoise =NULL; if (thresh > 0.0) { ac++; fnoise = argv[ac]; } ac++; int truth = ac; //ok = nifti_bptf(nim, hp_sigma, lp_sigma); ok = nifti_roc(nim, fabs(thresh), argv[outfile], fnoise, argv[truth]); if (ac >= argc) { fprintf(stderr,"Error: no output filename specified!\n"); //e.g. volume size might differ goto fail; } } else if ( ! strcmp(argv[ac], "-unsharp") ) { ac++; double sigma = strtod(argv[ac], &end); ac++; double amount = strtod(argv[ac], &end); nifti_unsharp(nim, sigma, sigma, sigma, amount); } else if ( ! strcmp(argv[ac], "-otsu") ) ok = nifti_otsu(nim, 0); else if ( ! strcmp(argv[ac], "-otsu0") ) ok = nifti_otsu(nim, 1); else if ( ! strcmp(argv[ac], "-subsamp2") ) ok = nifti_subsamp2(nim, 0); else if ( ! strcmp(argv[ac], "-subsamp2offc") ) ok = nifti_subsamp2(nim, 1); else if ( ! strcmp(argv[ac], "-sobel") ) ok = nifti_sobel(nim, 1); else if ( ! strcmp(argv[ac], "-demean") ) ok = nifti_demean(nim); else if ( ! strcmp(argv[ac], "-detrend") ) ok = nifti_detrend_linear(nim); else if ( ! strcmp(argv[ac], "-resize") ) { ac++; double X = strtod(argv[ac], &end); ac++; double Y = strtod(argv[ac], &end); ac++; double Z = strtod(argv[ac], &end); ac ++; int interp_method = atoi(argv[ac]); ok = nifti_resize(nim, X, Y, Z, interp_method); } else if ( ! strcmp(argv[ac], "-crop") ) { ac ++; int tmin = atoi(argv[ac]); ac ++; int tsize = atoi(argv[ac]); ok = nifti_crop(nim, tmin, tsize); } else if ( ! strcmp(argv[ac], "--compare") ) { //--function terminates without saving image ac ++; nifti_compare(nim, argv[ac]); //always terminates } else if ( ! strcmp(argv[ac], "-edt") ) ok = nifti_edt(nim); else if ( ! strcmp(argv[ac], "-fillh") ) ok = nifti_fillh(nim, 0); else if ( ! strcmp(argv[ac], "-fillh26") ) ok = nifti_fillh(nim, 1); else if ( ! strcmp(argv[ac], "-kernel") ) { ac ++; if (kernel != NULL) _mm_free(kernel); kernel = NULL; if ( ! strcmp(argv[ac], "3D") ) kernel = make_kernel(nim, &nkernel, 3,3,3); if ( ! strcmp(argv[ac], "2D") ) kernel = make_kernel(nim, &nkernel, 3,3,1); if ( ! strcmp(argv[ac], "boxv") ) { ac++; int vx = atoi(argv[ac]); kernel = make_kernel(nim, &nkernel, vx,vx,vx); } if ( ! strcmp(argv[ac], "sphere") ) { ac++; double mm = strtod(argv[ac], &end); kernel = make_kernel_sphere(nim, &nkernel, mm); } if ( ! strcmp(argv[ac], "file") ) { ac++; kernel = make_kernel_file(nim, &nkernel, argv[ac]); } if ( ! strcmp(argv[ac], "gauss") ) { ac++; double mm = strtod(argv[ac], &end); kernel = make_kernel_gauss(nim, &nkernel, mm); } if ( ! strcmp(argv[ac], "box") ) { //all voxels in a cube of width <size> mm centered on target voxel"); ac++; double mm = strtod(argv[ac], &end); int vx = (2*floor(mm/nim->dx))+1; int vy = (2*floor(mm/nim->dy))+1; int vz = (2*floor(mm/nim->dz))+1; kernel = make_kernel(nim, &nkernel, vx,vy,vz); } if ( ! strcmp(argv[ac], "boxv3") ) { ac++; int vx = atoi(argv[ac]); ac++; int vy = atoi(argv[ac]); ac++; int vz = atoi(argv[ac]); kernel = make_kernel(nim, &nkernel, vx,vy,vz); } if (kernel == NULL){ fprintf(stderr,"Error: '-kernel' option failed.\n"); //e.g. volume size might differ ok = 1; } } else if ( ! strcmp(argv[ac], "-tensor_2lower") ) { ok = nifti_tensor_2(nim, 0); } else if ( ! strcmp(argv[ac], "-tensor_2upper") ) { ok = nifti_tensor_2(nim, 1); } else if ( ! strcmp(argv[ac], "-tensor_decomp") ) { ok = nifti_tensor_decomp(nim,1); } else if ( ! strcmp(argv[ac], "-tensor_decomp_lower") ) { ok = nifti_tensor_decomp(nim,0); } else if ( ! strcmp(argv[ac], "-slicetimer") ) { #ifdef slicetimer ok = nifti_slicetimer(nim); #else fprintf(stderr,"Recompile to support slice timer\n"); //e.g. volume size might differ ok = 1; #endif } else if ( ! strcmp(argv[ac], "-save") ) { ac ++; char * fout2 = argv[ac]; if( nifti_set_filenames(nim, fout2, 1, 1) ) ok = 1; else { nifti_save(nim, ""); //nifti_image_write( nim ); nifti_set_filenames(nim, fout, 1, 1); } } else if ( ! strcmp(argv[ac], "-restart") ) { if (kernel != NULL) fprintf(stderr,"Warning: 'restart' resets the kernel\n"); //e.g. volume size might differ nifti_image_free( nim ); if (kernel != NULL) _mm_free(kernel); kernel = make_kernel(nim, &nkernel, 3,3,3); ac++; nim = nifti_image_read(argv[ac], 1); if( !nim )ok = 1; //error } else if ( ! strcmp(argv[ac], "-grid") ) { ac++; double v = strtod(argv[ac], &end); ac++; int s = atoi(argv[ac]); ok = nifti_grid(nim, v, s); } else if ( ! strcmp(argv[ac], "-tfce") ) { ac++; double H = strtod(argv[ac], &end); ac++; double E = strtod(argv[ac], &end); ac++; int c = atoi(argv[ac]); ok = nifti_tfce(nim, H, E, c); } else if ( ! strcmp(argv[ac], "-tfceS") ) { ac++; double H = strtod(argv[ac], &end); ac++; double E = strtod(argv[ac], &end); ac++; int c = atoi(argv[ac]); ac++; int x = atoi(argv[ac]); ac++; int y = atoi(argv[ac]); ac++; int z = atoi(argv[ac]); ac++; double tfce_thresh = strtod(argv[ac], &end); ok = nifti_tfceS(nim, H, E, c, x, y, z, tfce_thresh); } else if (op == unknown) { fprintf(stderr,"!!Error: unsupported operation '%s'\n", argv[ac]); goto fail; } if ((op >= dilMk) && (op <= fmeanuk)) ok = nifti_kernel (nim, op, kernel, nkernel); if ((op >= exp1) && (op <= ptoz1)) nifti_unary(nim, op); if ((op >= add) && (op < exp1)) { //binary operations ac++; double v = strtod(argv[ac], &end); //if (end == argv[ac]) { if (strlen(argv[ac]) != (end - argv[ac])) { // "4d" will return numeric "4" if ((op == power) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP) || (op == seed) ) { fprintf(stderr,"Error: '%s' expects numeric value\n", argv[ac-1]); goto fail; } else ok = nifti_binary(nim, argv[ac], op); } else { if (op == add) ok = nifti_rescale(nim , 1.0, v); if (op == sub) ok = nifti_rescale(nim , 1.0, -v); if (op == mul) ok = nifti_rescale(nim , v, 0.0); if (op == divX) ok = nifti_rescale(nim , 1.0/v, 0.0); if (op == mod) ok = nifti_rem(nim, v, 1); if (op == rem) ok = nifti_rem(nim, v, 0); if (op == mas) { fprintf(stderr,"Error: -mas expects image not number\n"); goto fail; } if (op == power) ok = nifti_binary_power(nim, v); if (op == thr) ok = nifti_thr(nim, v, 0); if ((op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP)) ok = nifti_thrp(nim, v, op); if (op == uthr) ok = nifti_thr(nim, v, 1); if (op == max) ok = nifti_max(nim, v, 0); if (op == min) ok = nifti_max(nim, v, 1); if (op == inm) ok = nifti_inm(nim, v); if (op == ing) ok = nifti_ing(nim, v); if (op == smth) ok = nifti_smooth_gauss(nim, v, v, v); if (op == seed) { if ((v > 0) && (v < 1)) v *= RAND_MAX; srand((unsigned)fabs(v)); } } } //binary operations if (ok != 0) goto fail; ac ++; } //convert data to output type (-odt) if (nifti_image_change_datatype(nim, dtOut, &ihdr) != 0) return 1; /* if we get here, write the output dataset */ //startTime = clock(); nifti_save(nim, ""); //nifti_image_write( nim ); //printf("write time: %ld ms\n", timediff(startTime, clock())); /* and clean up memory */ nifti_image_free( nim ); if (kernel != NULL) _mm_free(kernel); return 0; fail: nifti_image_free( nim ); if (kernel != NULL) _mm_free(kernel); return 1; } //main()
distance.c
/* * R : A Computer Language for Statistical Data Analysis * Copyright (C) 1998-2017 The R Core Team * Copyright (C) 2002-2017 The R Foundation * Copyright (C) 1995, 1996 Robert Gentleman and Ross Ihaka * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, a copy is available at * https://www.R-project.org/Licenses/ */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include <float.h> #include <R.h> #include <Rmath.h> #include "stats.h" #ifdef _OPENMP # include <R_ext/MathThreads.h> #endif #define both_FINITE(a,b) (R_FINITE(a) && R_FINITE(b)) #ifdef R_160_and_older #define both_non_NA both_FINITE #else #define both_non_NA(a,b) (!ISNAN(a) && !ISNAN(b)) #endif static double R_euclidean(double *x, int nr, int nc, int i1, int i2) { double dev, dist; int count, j; count= 0; dist = 0; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { dev = (x[i1] - x[i2]); if(!ISNAN(dev)) { dist += dev * dev; count++; } } i1 += nr; i2 += nr; } if(count == 0) return NA_REAL; if(count != nc) dist /= ((double)count/nc); return sqrt(dist); } static double R_maximum(double *x, int nr, int nc, int i1, int i2) { double dev, dist; int count, j; count = 0; dist = -DBL_MAX; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { dev = fabs(x[i1] - x[i2]); if(!ISNAN(dev)) { if(dev > dist) dist = dev; count++; } } i1 += nr; i2 += nr; } if(count == 0) return NA_REAL; return dist; } static double R_manhattan(double *x, int nr, int nc, int i1, int i2) { double dev, dist; int count, j; count = 0; dist = 0; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { dev = fabs(x[i1] - x[i2]); if(!ISNAN(dev)) { dist += dev; count++; } } i1 += nr; i2 += nr; } if(count == 0) return NA_REAL; if(count != nc) dist /= ((double)count/nc); return dist; } static double R_canberra(double *x, int nr, int nc, int i1, int i2) { double dev, dist, sum, diff; int count, j; count = 0; dist = 0; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { sum = fabs(x[i1]) + fabs(x[i2]); diff = fabs(x[i1] - x[i2]); if (sum > DBL_MIN || diff > DBL_MIN) { dev = diff/sum; if(!ISNAN(dev) || (!R_FINITE(diff) && diff == sum && /* use Inf = lim x -> oo */ (dev = 1., TRUE))) { dist += dev; count++; } } } i1 += nr; i2 += nr; } if(count == 0) return NA_REAL; if(count != nc) dist /= ((double)count/nc); return dist; } static double R_dist_binary(double *x, int nr, int nc, int i1, int i2) { int total, count, dist; int j; total = 0; count = 0; dist = 0; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { if(!both_FINITE(x[i1], x[i2])) { warning(_("treating non-finite values as NA")); } else { if(x[i1] != 0. || x[i2] != 0.) { count++; if( ! (x[i1] != 0. && x[i2] != 0.) ) dist++; } total++; } } i1 += nr; i2 += nr; } if(total == 0) return NA_REAL; if(count == 0) return 0; return (double) dist / count; } static double R_minkowski(double *x, int nr, int nc, int i1, int i2, double p) { double dev, dist; int count, j; count= 0; dist = 0; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { dev = (x[i1] - x[i2]); if(!ISNAN(dev)) { dist += R_pow(fabs(dev), p); count++; } } i1 += nr; i2 += nr; } if(count == 0) return NA_REAL; if(count != nc) dist /= ((double)count/nc); return R_pow(dist, 1.0/p); } enum { EUCLIDEAN=1, MAXIMUM, MANHATTAN, CANBERRA, BINARY, MINKOWSKI }; /* == 1,2,..., defined by order in the R function dist */ void R_distance(double *x, int *nr, int *nc, double *d, int *diag, int *method, double *p) { int dc, i, j; size_t ij; /* can exceed 2^31 - 1 */ double (*distfun)(double*, int, int, int, int) = NULL; #ifdef _OPENMP int nthreads; #endif switch(*method) { case EUCLIDEAN: distfun = R_euclidean; break; case MAXIMUM: distfun = R_maximum; break; case MANHATTAN: distfun = R_manhattan; break; case CANBERRA: distfun = R_canberra; break; case BINARY: distfun = R_dist_binary; break; case MINKOWSKI: if(!R_FINITE(*p) || *p <= 0) error(_("distance(): invalid p")); // plus special case below because of extra argument break; default: error(_("distance(): invalid distance")); } dc = (*diag) ? 0 : 1; /* diag=1: we do the diagonal */ #ifdef _OPENMP if (R_num_math_threads > 0) nthreads = R_num_math_threads; else nthreads = 1; /* for now */ if (nthreads == 1) { /* do the nthreads == 1 case without any OMP overhead to see if it matters on some platforms */ ij = 0; for(j = 0 ; j <= *nr ; j++) for(i = j+dc ; i < *nr ; i++) d[ij++] = (*method != MINKOWSKI) ? distfun(x, *nr, *nc, i, j) : R_minkowski(x, *nr, *nc, i, j, *p); } else /* This produces uneven thread workloads since the outer loop is over the subdiagonal portions of columns. An alternative would be to use a loop on ij and to compute the i and j values from ij. */ #pragma omp parallel for num_threads(nthreads) default(none) \ private(i, j, ij) \ firstprivate(nr, dc, d, method, distfun, nc, x, p) for(j = 0 ; j <= *nr ; j++) { ij = j * (*nr - dc) + j - ((1 + j) * j) / 2; for(i = j+dc ; i < *nr ; i++) d[ij++] = (*method != MINKOWSKI) ? distfun(x, *nr, *nc, i, j) : R_minkowski(x, *nr, *nc, i, j, *p); } #else ij = 0; for(j = 0 ; j <= *nr ; j++) for(i = j+dc ; i < *nr ; i++) d[ij++] = (*method != MINKOWSKI) ? distfun(x, *nr, *nc, i, j) : R_minkowski(x, *nr, *nc, i, j, *p); #endif } #include <Rinternals.h> SEXP Cdist(SEXP x, SEXP smethod, SEXP attrs, SEXP p) { SEXP ans; int nr = nrows(x), nc = ncols(x), method = asInteger(smethod); int diag = 0; R_xlen_t N; double rp = asReal(p); N = (R_xlen_t)nr * (nr-1)/2; /* avoid int overflow for N ~ 50,000 */ PROTECT(ans = allocVector(REALSXP, N)); if(TYPEOF(x) != REALSXP) x = coerceVector(x, REALSXP); PROTECT(x); R_distance(REAL(x), &nr, &nc, REAL(ans), &diag, &method, &rp); /* tack on attributes */ SEXP names = getAttrib(attrs, R_NamesSymbol); for (int i = 0; i < LENGTH(attrs); i++) setAttrib(ans, install(translateChar(STRING_ELT(names, i))), VECTOR_ELT(attrs, i)); UNPROTECT(2); return ans; }
density_prior_box_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <vector> #include "paddle/fluid/operators/detection/prior_box_op.h" namespace paddle { namespace operators { template <typename T> class DensityPriorBoxOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<paddle::framework::Tensor>("Input"); auto* image = ctx.Input<paddle::framework::Tensor>("Image"); auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes"); auto* vars = ctx.Output<paddle::framework::Tensor>("Variances"); auto variances = ctx.Attr<std::vector<float>>("variances"); auto clip = ctx.Attr<bool>("clip"); auto fixed_sizes = ctx.Attr<std::vector<float>>("fixed_sizes"); auto fixed_ratios = ctx.Attr<std::vector<float>>("fixed_ratios"); auto densities = ctx.Attr<std::vector<int>>("densities"); T step_w = static_cast<T>(ctx.Attr<float>("step_w")); T step_h = static_cast<T>(ctx.Attr<float>("step_h")); T offset = static_cast<T>(ctx.Attr<float>("offset")); auto img_width = image->dims()[3]; auto img_height = image->dims()[2]; auto feature_width = input->dims()[3]; auto feature_height = input->dims()[2]; T step_width, step_height; if (step_w == 0 || step_h == 0) { step_width = static_cast<T>(img_width) / feature_width; step_height = static_cast<T>(img_height) / feature_height; } else { step_width = step_w; step_height = step_h; } int num_priors = 0; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for reduction(+ : num_priors) #endif for (size_t i = 0; i < densities.size(); ++i) { num_priors += (fixed_ratios.size()) * (pow(densities[i], 2)); } boxes->mutable_data<T>(ctx.GetPlace()); vars->mutable_data<T>(ctx.GetPlace()); auto box_dim = vars->dims(); boxes->Resize({feature_height, feature_width, num_priors, 4}); auto e_boxes = framework::EigenTensor<T, 4>::From(*boxes).setConstant(0.0); int step_average = static_cast<int>((step_width + step_height) * 0.5); std::vector<float> sqrt_fixed_ratios; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (size_t i = 0; i < fixed_ratios.size(); i++) { sqrt_fixed_ratios.push_back(sqrt(fixed_ratios[i])); } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int h = 0; h < feature_height; ++h) { for (int w = 0; w < feature_width; ++w) { T center_x = (w + offset) * step_width; T center_y = (h + offset) * step_height; int idx = 0; // Generate density prior boxes with fixed sizes. for (size_t s = 0; s < fixed_sizes.size(); ++s) { auto fixed_size = fixed_sizes[s]; int density = densities[s]; int shift = step_average / density; // Generate density prior boxes with fixed ratios. for (size_t r = 0; r < fixed_ratios.size(); ++r) { float box_width_ratio = fixed_size * sqrt_fixed_ratios[r]; float box_height_ratio = fixed_size / sqrt_fixed_ratios[r]; float density_center_x = center_x - step_average / 2. + shift / 2.; float density_center_y = center_y - step_average / 2. + shift / 2.; for (int di = 0; di < density; ++di) { for (int dj = 0; dj < density; ++dj) { float center_x_temp = density_center_x + dj * shift; float center_y_temp = density_center_y + di * shift; e_boxes(h, w, idx, 0) = std::max( (center_x_temp - box_width_ratio / 2.) / img_width, 0.); e_boxes(h, w, idx, 1) = std::max( (center_y_temp - box_height_ratio / 2.) / img_height, 0.); e_boxes(h, w, idx, 2) = std::min( (center_x_temp + box_width_ratio / 2.) / img_width, 1.); e_boxes(h, w, idx, 3) = std::min( (center_y_temp + box_height_ratio / 2.) / img_height, 1.); idx++; } } } } } } if (clip) { T* dt = boxes->data<T>(); std::transform(dt, dt + boxes->numel(), dt, [](T v) -> T { return std::min<T>(std::max<T>(v, 0.), 1.); }); } framework::Tensor var_t; var_t.mutable_data<T>( pten::make_ddim({1, static_cast<int>(variances.size())}), ctx.GetPlace()); auto var_et = framework::EigenTensor<T, 2>::From(var_t); for (size_t i = 0; i < variances.size(); ++i) { var_et(0, i) = variances[i]; } int box_num = feature_height * feature_width * num_priors; auto var_dim = vars->dims(); vars->Resize({box_num, static_cast<int>(variances.size())}); auto e_vars = framework::EigenMatrix<T, Eigen::RowMajor>::From(*vars); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int i = 0; i < box_num; ++i) { for (size_t j = 0; j < variances.size(); ++j) { e_vars(i, j) = variances[j]; } } vars->Resize(var_dim); boxes->Resize(box_dim); } }; // namespace operators } // namespace operators } // namespace paddle
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/OpenMPClause.h" #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseUniqueStableNameExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator( llvm::function_ref<void(const Designation &)> CodeCompleteCB); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); /// Parses intelfpga:: and clang:: loop attributes if the language is SYCL bool MaybeParseSYCLLoopAttributes(ParsedAttributes &Attrs) { if (getLangOpts().SYCLIsDevice || getLangOpts().SYCLIsHost) return ParseSYCLLoopAttributes(Attrs); return true; } bool ParseSYCLLoopAttributes(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitInfo::OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitInfo::OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitInfo::OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitInfo::OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitInfo::OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitInfo::OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers> MapTypeModifiers; SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers> MapTypeModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLastLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); NamedDecl * ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
GB_binop__le_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_uint32) // A.*B function (eWiseMult): GB (_AemultB_01__le_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__le_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__le_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint32) // A*D function (colscale): GB (_AxD__le_uint32) // D*A function (rowscale): GB (_DxB__le_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__le_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__le_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint32) // C=scalar+B GB (_bind1st__le_uint32) // C=scalar+B' GB (_bind1st_tran__le_uint32) // C=A+scalar GB (_bind2nd__le_uint32) // C=A'+scalar GB (_bind2nd_tran__le_uint32) // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_UINT32 || GxB_NO_LE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__le_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__le_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
par_relax.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Relaxation scheme * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "Common.h" #include "_hypre_lapack.h" #include "par_relax.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGRelax *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGRelax( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_type, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { HYPRE_Int relax_error = 0; /*--------------------------------------------------------------------------------------- * Switch statement to direct control based on relax_type: * relax_type = 0 -> Jacobi or CF-Jacobi * relax_type = 1 -> Gauss-Seidel <--- very slow, sequential * relax_type = 2 -> Gauss_Seidel: interior points in parallel, * boundary sequential * relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (forward solve) * relax_type = 4 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (backward solve) * relax_type = 5 -> hybrid: GS-J mix off-processor, chaotic GS on-node * relax_type = 6 -> hybrid: SSOR-J mix off-processor, SSOR on-processor * with outer relaxation parameters * relax_type = 7 -> Jacobi (uses Matvec), only needed in CGNR [GPU-supported, CF supported with redundant computation] * relax_type = 8 -> hybrid L1 Symm. Gauss-Seidel * relax_type = 9 -> Direct solve, Gaussian elimination * relax_type = 10 -> On-processor direct forward solve for matrices with * triangular structure (indices need not be ordered * triangular) * relax_type = 11 -> Two Stage approximation to GS. Uses the strict lower * part of the diagonal matrix * relax_type = 12 -> Two Stage approximation to GS. Uses the strict lower * part of the diagonal matrix and a second iteration * for additional error approximation * relax_type = 13 -> hybrid L1 Gauss-Seidel forward solve * relax_type = 14 -> hybrid L1 Gauss-Seidel backward solve * relax_type = 15 -> CG * relax_type = 16 -> Scaled Chebyshev * relax_type = 17 -> FCF-Jacobi * relax_type = 18 -> L1-Jacobi [GPU-supported through call to relax7Jacobi] * relax_type = 19 -> Direct Solve, (old version) * relax_type = 20 -> Kaczmarz * relax_type = 29 -> Direct solve: use gaussian elimination & BLAS * (with pivoting) (old version) * relax_type = 98 -> Direct solve, Gaussian elimination * relax_type = 99 -> Direct solve, Gaussian elimination * relax_type = 199-> Direct solve, Gaussian elimination *-------------------------------------------------------------------------------------*/ switch (relax_type) { case 0: /* Weighted Jacobi */ hypre_BoomerAMGRelax0WeightedJacobi(A, f, cf_marker, relax_points, relax_weight, u, Vtemp); break; case 1: /* Gauss-Seidel VERY SLOW */ hypre_BoomerAMGRelax1GaussSeidel(A, f, cf_marker, relax_points, u); break; case 2: /* Gauss-Seidel: relax interior points in parallel, boundary sequentially */ hypre_BoomerAMGRelax2GaussSeidel(A, f, cf_marker, relax_points, u); break; /* Hybrid: Jacobi off-processor, Gauss-Seidel on-processor (forward loop) */ case 3: hypre_BoomerAMGRelax3HybridGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 4: /* Hybrid: Jacobi off-processor, Gauss-Seidel/SOR on-processor (backward loop) */ hypre_BoomerAMGRelax4HybridGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 5: /* Hybrid: Jacobi off-processor, chaotic Gauss-Seidel on-processor */ hypre_BoomerAMGRelax5ChaoticHybridGaussSeidel(A, f, cf_marker, relax_points, u); break; case 6: /* Hybrid: Jacobi off-processor, Symm. Gauss-Seidel/SSOR on-processor with outer relaxation parameter */ hypre_BoomerAMGRelax6HybridSSOR(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 7: /* Jacobi (uses ParMatvec) */ hypre_BoomerAMGRelax7Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); break; case 8: /* hybrid L1 Symm. Gauss-Seidel */ hypre_BoomerAMGRelax8HybridL1SSOR(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp); break; /* Hybrid: Jacobi off-processor, ordered Gauss-Seidel on-processor */ case 10: hypre_BoomerAMGRelax10TopoOrderedGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 11: /* Two Stage Gauss Seidel. Forward sweep only */ hypre_BoomerAMGRelax11TwoStageGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 12: /* Two Stage Gauss Seidel. Uses the diagonal matrix for the GS part */ hypre_BoomerAMGRelax12TwoStageGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 13: /* hybrid L1 Gauss-Seidel forward solve */ hypre_BoomerAMGRelax13HybridL1GaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp); break; case 14: /* hybrid L1 Gauss-Seidel backward solve */ hypre_BoomerAMGRelax14HybridL1GaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp); break; case 18: /* weighted L1 Jacobi */ hypre_BoomerAMGRelax18WeightedL1Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); break; case 19: /* Direct solve: use gaussian elimination */ relax_error = hypre_BoomerAMGRelax19GaussElim(A, f, u); break; case 20: /* Kaczmarz */ hypre_BoomerAMGRelaxKaczmarz(A, f, omega, l1_norms, u); break; case 98: /* Direct solve: use gaussian elimination & BLAS (with pivoting) */ relax_error = hypre_BoomerAMGRelax98GaussElimPivot(A, f, u); break; } return relax_error; } HYPRE_Int hypre_BoomerAMGRelaxWeightedJacobi_core( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, HYPRE_Int Skip_diag ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Complex *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Real one_minus_weight = 1.0 - relax_weight; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, index, num_sends, start; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { Vtemp_data[i] = u_data[i]; } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { const HYPRE_Complex di = l1_norms ? l1_norms[i] : A_diag_data[A_diag_i[i]]; /*----------------------------------------------------------- * If i is of the right type ( C or F or All ) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && di != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + Skip_diag; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } if (Skip_diag) { u_data[i] *= one_minus_weight; u_data[i] += relax_weight * res / di; } else { u_data[i] += relax_weight * res / di; } } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax0WeightedJacobi( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, hypre_ParVector *u, hypre_ParVector *Vtemp ) { return hypre_BoomerAMGRelaxWeightedJacobi_core(A, f, cf_marker, relax_points, relax_weight, NULL, u, Vtemp, 1); } HYPRE_Int hypre_BoomerAMGRelax18WeightedL1Jacobi( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { // XXX GPU calls Relax7 XXX return hypre_BoomerAMGRelax7Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); } else #endif { /* in the case of non-CF, use relax-7 which is faster */ if (relax_points == 0) { return hypre_BoomerAMGRelax7Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); } else { return hypre_BoomerAMGRelaxWeightedJacobi_core(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp, 0); } } } HYPRE_Int hypre_BoomerAMGRelax1GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, p, jr, ip, num_sends, num_recvs, vec_start, vec_len; hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_recvs + num_sends, HYPRE_MEMORY_HOST); requests = hypre_CTAlloc(hypre_MPI_Request, num_recvs + num_sends, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1) - vec_start; for (j = vec_start; j < vec_start + vec_len; j++) { v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr, requests, status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i + 1) - vec_start; hypre_MPI_Irecv(&v_ext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr, requests, status); } for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_diag_data[A_diag_i[i]] != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } if (num_procs > 1) { hypre_MPI_Barrier(comm); } } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax2GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, p, jr, ip, num_sends, num_recvs, vec_start, vec_len; hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_recvs + num_sends, HYPRE_MEMORY_HOST); requests = hypre_CTAlloc(hypre_MPI_Request, num_recvs + num_sends, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Relax interior points first *-----------------------------------------------------------------*/ for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F or All ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_offd_i[i + 1] - A_offd_i[i] == zero && A_diag_data[A_diag_i[i]] != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1) - vec_start; for (j = vec_start; j < vec_start + vec_len; j++) { v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr, requests, status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i + 1) - vec_start; hypre_MPI_Irecv(&v_ext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr, requests, status); } for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F or All) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_offd_i[i + 1] - A_offd_i[i] != zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } if (num_procs > 1) { hypre_MPI_Barrier(comm); } } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelaxHybridGaussSeidel_core( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp, HYPRE_Int GS_order, HYPRE_Int Symm, HYPRE_Int Skip_diag, HYPRE_Int forced_seq, HYPRE_Int Topo_order ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = Vtemp ? hypre_ParVectorLocalVector(Vtemp) : NULL; HYPRE_Complex *Vtemp_data = Vtemp_local ? hypre_VectorData(Vtemp_local) : NULL; /* hypre_Vector *Ztemp_local = NULL; HYPRE_Complex *Ztemp_data = NULL; */ HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Int *proc_ordering = NULL; const HYPRE_Real one_minus_omega = 1.0 - omega; HYPRE_Int num_procs, my_id, num_threads, j, num_sends; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = forced_seq ? 1 : hypre_NumThreads(); /* GS order: forward or backward */ const HYPRE_Int gs_order = GS_order > 0 ? 1 : -1; /* for symmetric GS, a forward followed by a backward */ const HYPRE_Int num_sweeps = Symm ? 2 : 1; /* if relax_weight and omega are both 1.0 */ const HYPRE_Int non_scale = relax_weight == 1.0 && omega == 1.0; /* */ const HYPRE_Real prod = 1.0 - relax_weight * omega; /* if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } */ #if defined(HYPRE_USING_PERSISTENT_COMM) // JSP: persistent comm can be similarly used for other smoothers hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (num_procs > 1) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); #if defined(HYPRE_USING_PERSISTENT_COMM) persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); v_buf_data = (HYPRE_Real *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); v_ext_data = (HYPRE_Real *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); #else v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); #endif HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (j = begin; j < end; j++) { v_buf_data[j - begin] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif #if defined(HYPRE_USING_PERSISTENT_COMM) hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_HOST, v_buf_data); #else comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data); #endif #if defined(HYPRE_USING_PERSISTENT_COMM) hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_HOST, v_ext_data); #else hypre_ParCSRCommHandleDestroy(comm_handle); #endif comm_handle = NULL; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif } if (Topo_order) { /* Check for ordering of matrix. If stored, get pointer, otherwise * compute ordering and point matrix variable to array. * Used in AIR */ if (!hypre_ParCSRMatrixProcOrdering(A)) { proc_ordering = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); hypre_topo_sort(A_diag_i, A_diag_j, A_diag_data, proc_ordering, num_rows); hypre_ParCSRMatrixProcOrdering(A) = proc_ordering; } else { proc_ordering = hypre_ParCSRMatrixProcOrdering(A); } } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime(); #endif if ( (num_threads > 1 || !non_scale) && Vtemp_data ) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_rows; j++) { Vtemp_data[j] = u_data[j]; } } if (num_threads > 1) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { HYPRE_Int ns, ne, sweep; hypre_partition1D(num_rows, num_threads, j, &ns, &ne); for (sweep = 0; sweep < num_sweeps; sweep++) { const HYPRE_Int iorder = num_sweeps == 1 ? gs_order : sweep == 0 ? 1 : -1; const HYPRE_Int ibegin = iorder > 0 ? ns : ne - 1; const HYPRE_Int iend = iorder > 0 ? ne : ns - 1; if (non_scale) { hypre_HybridGaussSeidelNSThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, l1_norms, u_data, Vtemp_data, v_ext_data, ns, ne, ibegin, iend, iorder, Skip_diag); } else { hypre_HybridGaussSeidelThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, relax_weight, omega, one_minus_omega, prod, l1_norms, u_data, Vtemp_data, v_ext_data, ns, ne, ibegin, iend, iorder, Skip_diag); } } /* for (sweep = 0; sweep < num_sweeps; sweep++) */ } /* for (j = 0; j < num_threads; j++) */ } else /* if (num_threads > 1) */ { HYPRE_Int sweep; for (sweep = 0; sweep < num_sweeps; sweep++) { const HYPRE_Int iorder = num_sweeps == 1 ? gs_order : sweep == 0 ? 1 : -1; const HYPRE_Int ibegin = iorder > 0 ? 0 : num_rows - 1; const HYPRE_Int iend = iorder > 0 ? num_rows : -1; if (Topo_order) { hypre_HybridGaussSeidelOrderedNS(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, u_data, NULL, v_ext_data, ibegin, iend, iorder, proc_ordering); } else { if (non_scale) { hypre_HybridGaussSeidelNS(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, l1_norms, u_data, Vtemp_data, v_ext_data, ibegin, iend, iorder, Skip_diag); } else { hypre_HybridGaussSeidel(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, relax_weight, omega, one_minus_omega, prod, l1_norms, u_data, Vtemp_data, v_ext_data, ibegin, iend, iorder, Skip_diag); } } } /* for (sweep = 0; sweep < num_sweeps; sweep++) */ } /* if (num_threads > 1) */ #ifndef HYPRE_USING_PERSISTENT_COMM if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /* forward hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax3HybridGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */, 1 /* skip diag */, 0, 0); } } /* backward hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax4HybridGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, -1 /* backward */, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, -1 /* backward */, 0 /* nosymm */, 1 /* skip diag */, 0, 0); } } /* chaotic forward G-S */ HYPRE_Int hypre_BoomerAMGRelax5ChaoticHybridGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, index, num_sends, start; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F or All) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_diag_data[A_diag_i[i]] != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /* symmetric hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax6HybridSSOR( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1, 1 /* symm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1, 1 /* symm */, 1 /* skip diag */, 0, 0); } } HYPRE_Int hypre_BoomerAMGRelax7Jacobi( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp ) { HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_Vector l1_norms_vec; hypre_ParVector l1_norms_parvec; hypre_VectorData(&l1_norms_vec) = l1_norms; hypre_VectorSize(&l1_norms_vec) = num_rows; /* TODO XXX * The next line is NOT 100% correct, which should be the memory location of l1_norms instead of f * But how do I know it? As said, don't use raw pointers, don't use raw pointers! * It is fine normally since A, f, and l1_norms should live in the same memory space */ hypre_VectorMemoryLocation(&l1_norms_vec) = hypre_ParVectorMemoryLocation(f); hypre_ParVectorLocalVector(&l1_norms_parvec) = &l1_norms_vec; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif /*----------------------------------------------------------------- * Copy f into temporary vector. *-----------------------------------------------------------------*/ hypre_ParVectorCopy(f, Vtemp); /*----------------------------------------------------------------- * Perform Matvec Vtemp = w * (f - Au) *-----------------------------------------------------------------*/ hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, Vtemp); /*----------------------------------------------------------------- * u += D^{-1} * Vtemp, where D_ii = ||A(i,:)||_1 *-----------------------------------------------------------------*/ if (relax_points) { hypre_ParVectorElmdivpyMarked(Vtemp, &l1_norms_parvec, u, cf_marker, relax_points); } else { hypre_ParVectorElmdivpy(Vtemp, &l1_norms_parvec, u); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncComputeStream(hypre_handle()); #endif return hypre_error_flag; } /* symmetric l1 hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax8HybridL1SSOR( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1, 1 /* symm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1, 1 /* symm */, skip_diag, 0, 0); } } /* forward hybrid topology ordered G-S */ HYPRE_Int hypre_BoomerAMGRelax10TopoOrderedGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */, 1 /* skip_diag */, 1, 1); } /* forward l1 hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax13HybridL1GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */, skip_diag, 0, 0 ); } } /* backward l1 hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax14HybridL1GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, -1, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, -1 /* backward */, 0 /* nonsymm */, skip_diag, 0, 0 ); } } HYPRE_Int hypre_BoomerAMGRelax19GaussElim( hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u); HYPRE_Int n_global = (HYPRE_Int) global_num_rows; HYPRE_Int first_index = (HYPRE_Int) first_ind; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_CSRMatrix *A_CSR; HYPRE_Int *A_CSR_i; HYPRE_Int *A_CSR_j; HYPRE_Real *A_CSR_data; hypre_Vector *f_vector; HYPRE_Real *f_vector_data; HYPRE_Real *A_mat; HYPRE_Real *b_vec; HYPRE_Int i, jj, column, relax_error = 0; /*----------------------------------------------------------------- * Generate CSR matrix from ParCSRMatrix A *-----------------------------------------------------------------*/ /* all processors are needed for these routines */ A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); if (num_rows) { A_CSR_i = hypre_CSRMatrixI(A_CSR); A_CSR_j = hypre_CSRMatrixJ(A_CSR); A_CSR_data = hypre_CSRMatrixData(A_CSR); f_vector_data = hypre_VectorData(f_vector); A_mat = hypre_CTAlloc(HYPRE_Real, n_global * n_global, HYPRE_MEMORY_HOST); b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST); /*--------------------------------------------------------------- * Load CSR matrix into A_mat. *---------------------------------------------------------------*/ for (i = 0; i < n_global; i++) { for (jj = A_CSR_i[i]; jj < A_CSR_i[i + 1]; jj++) { column = A_CSR_j[jj]; A_mat[i * n_global + column] = A_CSR_data[jj]; } b_vec[i] = f_vector_data[i]; } hypre_gselim(A_mat, b_vec, n_global, relax_error); for (i = 0; i < num_rows; i++) { u_data[i] = b_vec[first_index + i]; } hypre_TFree(A_mat, HYPRE_MEMORY_HOST); hypre_TFree(b_vec, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } else { hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } return relax_error; } HYPRE_Int hypre_BoomerAMGRelax98GaussElimPivot( hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u); HYPRE_Int n_global = (HYPRE_Int) global_num_rows; HYPRE_Int first_index = (HYPRE_Int) first_ind; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_CSRMatrix *A_CSR; HYPRE_Int *A_CSR_i; HYPRE_Int *A_CSR_j; HYPRE_Real *A_CSR_data; hypre_Vector *f_vector; HYPRE_Real *f_vector_data; HYPRE_Real *A_mat; HYPRE_Real *b_vec; HYPRE_Int i, jj, column, relax_error = 0; HYPRE_Int info; HYPRE_Int one_i = 1; HYPRE_Int *piv; /*----------------------------------------------------------------- * Generate CSR matrix from ParCSRMatrix A *-----------------------------------------------------------------*/ /* all processors are needed for these routines */ A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); if (num_rows) { A_CSR_i = hypre_CSRMatrixI(A_CSR); A_CSR_j = hypre_CSRMatrixJ(A_CSR); A_CSR_data = hypre_CSRMatrixData(A_CSR); f_vector_data = hypre_VectorData(f_vector); A_mat = hypre_CTAlloc(HYPRE_Real, n_global * n_global, HYPRE_MEMORY_HOST); b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST); /*--------------------------------------------------------------- * Load CSR matrix into A_mat. *---------------------------------------------------------------*/ for (i = 0; i < n_global; i++) { for (jj = A_CSR_i[i]; jj < A_CSR_i[i + 1]; jj++) { /* need col major */ column = A_CSR_j[jj]; A_mat[i + n_global * column] = A_CSR_data[jj]; } b_vec[i] = f_vector_data[i]; } piv = hypre_CTAlloc(HYPRE_Int, n_global, HYPRE_MEMORY_HOST); /* write over A with LU */ hypre_dgetrf(&n_global, &n_global, A_mat, &n_global, piv, &info); /*now b_vec = inv(A)*b_vec */ hypre_dgetrs("N", &n_global, &one_i, A_mat, &n_global, piv, b_vec, &n_global, &info); hypre_TFree(piv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows; i++) { u_data[i] = b_vec[first_index + i]; } hypre_TFree(A_mat, HYPRE_MEMORY_HOST); hypre_TFree(b_vec, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } else { hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } return relax_error; } HYPRE_Int hypre_BoomerAMGRelaxKaczmarz( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *u_offd_data = NULL; HYPRE_Complex *u_buf_data = NULL; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, index, num_sends, start; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, u_buf_data, u_offd_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST); } /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { res -= A_diag_data[j] * u_data[A_diag_j[j]]; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { res -= A_offd_data[j] * u_offd_data[A_offd_j[j]]; } res /= l1_norms[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { u_data[A_diag_j[j]] += omega * res * A_diag_data[j]; } } /* Backward local pass */ for (i = num_rows - 1; i > -1; i--) { res = f_data[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { res -= A_diag_data[j] * u_data[A_diag_j[j]]; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { res -= A_offd_data[j] * u_offd_data[A_offd_j[j]]; } res /= l1_norms[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { u_data[A_diag_j[j]] += omega * res * A_diag_data[j]; } } hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelaxTwoStageGaussSeidelHost( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, HYPRE_Int num_inner_iters) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Complex *Vtemp_data = hypre_VectorData(Vtemp_local); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); HYPRE_Int i, k, jj, ii; HYPRE_Complex multiplier = 1.0; /* Need to check that EVERY diagonal is nonzero first. If any are, throw exception */ for (i = 0; i < num_rows; i++) { if (A_diag_data[A_diag_i[i]] == 0.0) { hypre_error_in_arg(1); } } hypre_ParCSRMatrixMatvecOutOfPlace(-relax_weight, A, u, relax_weight, f, Vtemp); for (i = 0; i < num_rows; i++) /* Run the smoother */ { // V = V/D Vtemp_data[i] /= A_diag_data[A_diag_i[i]]; // u = u + m*v u_data[i] += multiplier * Vtemp_data[i]; } // adjust for the alternating series multiplier *= -1.0; for (k = 0; k < num_inner_iters; ++k) { // By going from bottom to top, we can update Vtemp in place because // we're operating with the strict, lower triangular matrix for (i = num_rows - 1; i >= 0; i--) /* Run the smoother */ { // spmv for the row first HYPRE_Complex res = 0.0; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; if (ii < i) { res += A_diag_data[jj] * Vtemp_data[ii]; } } // diagonal scaling has to come after the spmv accumulation. It's a row scaling // not column Vtemp_data[i] = res / A_diag_data[A_diag_i[i]]; u_data[i] += multiplier * Vtemp_data[i]; } // adjust for the alternating series multiplier *= -1.0; } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax11TwoStageGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGRelaxTwoStageGaussSeidelDevice(A, f, relax_weight, omega, u, Vtemp, Ztemp, 1); } else #endif { hypre_BoomerAMGRelaxTwoStageGaussSeidelHost(A, f, relax_weight, omega, u, Vtemp, 1); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax12TwoStageGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGRelaxTwoStageGaussSeidelDevice(A, f, relax_weight, omega, u, Vtemp, Ztemp, 2); } else #endif { hypre_BoomerAMGRelaxTwoStageGaussSeidelHost(A, f, relax_weight, omega, u, Vtemp, 2); } return hypre_error_flag; }
CPULauncher.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #pragma once #include <vector> #include "open3d/core/AdvancedIndexing.h" #include "open3d/core/Indexer.h" #include "open3d/core/Tensor.h" #include "open3d/core/kernel/ParallelUtil.h" #include "open3d/utility/Logging.h" namespace open3d { namespace core { namespace kernel { namespace cpu_launcher { /// \brief Run a function in parallel on CPU. /// /// This is typically used together with cuda_launcher::ParallelFor() to /// share the same code between CPU and CUDA. For example: /// /// ```cpp /// #if defined(__CUDACC__) /// namespace launcher = core::kernel::cuda_launcher; /// #else /// namespace launcher = core::kernel::cpu_launcher; /// #endif /// /// launcher::ParallelFor(num_workloads, [=] OPEN3D_DEVICE(int64_t idx) { /// process_workload(idx); /// }); /// ``` /// /// \param n The number of workloads. /// \param func The function to be executed in parallel. The function should /// take an int64_t workload index and returns void, i.e., `void func(int64_t)`. /// /// \note This is optimized for uniform work items, i.e. where each call to \p /// func takes the same time. /// \note If you use a lambda function, capture only the required variables /// instead of all to prevent accidental race conditions. If you want the kernel /// to be used on both CPU and CUDA, capture the variables by value. template <typename func_t> void ParallelFor(int64_t n, const func_t& func) { #pragma omp parallel for schedule(static) for (int64_t i = 0; i < n; ++i) { func(i); } } /// Fills tensor[:][i] with func(i). /// /// \param indexer The input tensor and output tensor to the indexer are the /// same (as a hack), since the tensor are filled in-place. /// \param func A function that takes pointer location and /// workload index i, computes the value to fill, and fills the value at the /// pointer location. template <typename func_t> void LaunchIndexFillKernel(const Indexer& indexer, const func_t& func) { #pragma omp parallel for schedule(static) for (int64_t i = 0; i < indexer.NumWorkloads(); ++i) { func(indexer.GetInputPtr(0, i), i); } } template <typename func_t> void LaunchUnaryEWKernel(const Indexer& indexer, const func_t& func) { #pragma omp parallel for schedule(static) for (int64_t i = 0; i < indexer.NumWorkloads(); ++i) { func(indexer.GetInputPtr(0, i), indexer.GetOutputPtr(i)); } } template <typename func_t> void LaunchBinaryEWKernel(const Indexer& indexer, const func_t& func) { #pragma omp parallel for schedule(static) for (int64_t i = 0; i < indexer.NumWorkloads(); ++i) { func(indexer.GetInputPtr(0, i), indexer.GetInputPtr(1, i), indexer.GetOutputPtr(i)); } } template <typename func_t> void LaunchAdvancedIndexerKernel(const AdvancedIndexer& indexer, const func_t& func) { #pragma omp parallel for schedule(static) for (int64_t i = 0; i < indexer.NumWorkloads(); ++i) { func(indexer.GetInputPtr(i), indexer.GetOutputPtr(i)); } } template <typename scalar_t, typename func_t> void LaunchReductionKernelSerial(const Indexer& indexer, const func_t& func) { for (int64_t i = 0; i < indexer.NumWorkloads(); ++i) { func(indexer.GetInputPtr(0, i), indexer.GetOutputPtr(i)); } } /// Create num_threads workers to compute partial reductions and then reduce /// to the final results. This only applies to reduction op with one output. template <typename scalar_t, typename func_t> void LaunchReductionKernelTwoPass(const Indexer& indexer, const func_t& func, scalar_t identity) { if (indexer.NumOutputElements() > 1) { utility::LogError( "Internal error: two-pass reduction only works for " "single-output reduction ops."); } int64_t num_workloads = indexer.NumWorkloads(); int64_t num_threads = GetMaxThreads(); int64_t workload_per_thread = (num_workloads + num_threads - 1) / num_threads; std::vector<scalar_t> thread_results(num_threads, identity); #pragma omp parallel for schedule(static) for (int64_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) { int64_t start = thread_idx * workload_per_thread; int64_t end = std::min(start + workload_per_thread, num_workloads); for (int64_t i = start; i < end; ++i) { func(indexer.GetInputPtr(0, i), &thread_results[thread_idx]); } } void* output_ptr = indexer.GetOutputPtr(0); for (int64_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) { func(&thread_results[thread_idx], output_ptr); } } template <typename scalar_t, typename func_t> void LaunchReductionParallelDim(const Indexer& indexer, const func_t& func) { // Prefers outer dimension >= num_threads. const int64_t* indexer_shape = indexer.GetMasterShape(); const int64_t num_dims = indexer.NumDims(); int64_t num_threads = GetMaxThreads(); // Init best_dim as the outer-most non-reduction dim. int64_t best_dim = num_dims - 1; while (best_dim >= 0 && indexer.IsReductionDim(best_dim)) { best_dim--; } for (int64_t dim = best_dim; dim >= 0 && !indexer.IsReductionDim(dim); --dim) { if (indexer_shape[dim] >= num_threads) { best_dim = dim; break; } else if (indexer_shape[dim] > indexer_shape[best_dim]) { best_dim = dim; } } if (best_dim == -1) { utility::LogError( "Internal error: all dims are reduction dims, use " "LaunchReductionKernelTwoPass instead."); } #pragma omp parallel for schedule(static) for (int64_t i = 0; i < indexer_shape[best_dim]; ++i) { Indexer sub_indexer(indexer); sub_indexer.ShrinkDim(best_dim, i, 1); LaunchReductionKernelSerial<scalar_t>(sub_indexer, func); } } } // namespace cpu_launcher } // namespace kernel } // namespace core } // namespace open3d
solver-omp.c
#include "heat.h" #include "omp.h" /* * Function to copy one matrix into another */ void copy_mat (double *u, double *v, unsigned sizex, unsigned sizey) { #pragma omp parallel for for (int i=1; i<=sizex-2; i++) for (int j=1; j<=sizey-2; j++) v[ i*sizey+j ] = u[ i*sizey+j ]; } /* * Blocked Jacobi solver: one iteration step */ double relax_jacobi (double *u, double *utmp, unsigned sizex, unsigned sizey) { double diff, sum=0.0; int howmany=omp_get_max_threads(); #pragma omp parallel for private(diff) reduction(+:sum) for (int blockid = 0; blockid < howmany; ++blockid) { int i_start = lowerb(blockid, howmany, sizex); int i_end = upperb(blockid, howmany, sizex); for (int i=max(1, i_start); i<= min(sizex-2, i_end); i++) { for (int j=1; j<= sizey-2; j++) { utmp[i*sizey+j]= 0.25 * ( u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = utmp[i*sizey+j] - u[i*sizey + j]; sum += diff * diff; } } } return sum; } /* * Blocked Gauss-Seidel solver: one iteration step */ double relax_gauss (double *u, unsigned sizex, unsigned sizey) { double unew, diff, sum=0.0; int howmany=omp_get_max_threads(); int howmanyc = 4; #pragma omp parallel for schedule(static) ordered (2) private(diff) reduction(+:sum) for (int blockidf = 0; blockidf < howmany; ++blockidf) { for(int blockidc = 0; blockidc < howmanyc; ++blockidc) { int j_start = lowerb(blockidc, howmanyc, sizey); int j_end = upperb(blockidc, howmanyc, sizey); int i_start = lowerb(blockidf, howmany, sizex); int i_end = upperb(blockidf, howmany, sizex); #pragma omp ordered depend(sink: blockidf-1, blockidc) depend(sink: blockidf, blockidc-1) for (int i=max(1, i_start); i<= min(sizex-2, i_end); i++) { for (int j=max(1, j_start); j<= min(sizey-2, j_end); j++) { unew= 0.25 * ( u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1)]+ // right u[ (i-1)*sizey + j]+ // top u[ (i+1)*sizey + j]); // bottom diff = unew - u[i*sizey+ j]; sum += diff * diff; u[i*sizey+j]=unew; } } #pragma omp ordered depend(source) } } return sum; }
game.c
/* * Copyright (C) 2009 Raphael Kubo da Costa <kubito@gmail.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <assert.h> #include <pcre.h> #include <string.h> #include "config.h" #include "game.h" #include "mem.h" /** * Returns the string matched by the first subgroup pattern in a regular expression. * * This function is useful if the regular expression is used primarily to find * one single pattern inside the given string. * Please note that it's up to the programmer to specify a regular expression with * at least one subgroup. * * @param pattern The regular expression to use. * @param subject The string to be searched. * * @return The matched string. */ static char *__re_get_first_match(const char *pattern, const char *subject) { int erroffset; const char *error; char *match; int ovector[30]; int rc; pcre *re; re = pcre_compile(pattern, PCRE_NEWLINE_LF, &error, &erroffset, NULL); if (re == NULL) return NULL; rc = pcre_exec(re, NULL, subject, strlen(subject), 0, 0, ovector, 30); if (rc <= 0) return NULL; match = MEM_ALLOC_N(char, ovector[3] - ovector[2] + 1); strncpy(match, subject + ovector[2], ovector[3] - ovector[2]); pcre_free(re); return match; } /** * Parses the custom board file format. * * @param game Pointer to a Game structure. * @param board The file to read. * * @retval 0 The file was parsed correctly. * @retval 1 The file could not be parsed. */ static int __parse_custom_format(Game *game, FILE *board) { char boardline_re[20]; char *endptr; char header_line[16]; size_t i, j; char *line; char *s; /* First line - "Rows:NNN" */ fgets(header_line, 16, board); s = __re_get_first_match("^Rows: *(\\d{1,10})$", header_line); if (!s) { free(s); return 1; } game->rows = (size_t) strtol(s, &endptr, 10); if (*endptr != '\0') { free(s); return 1; } else { free(s); } /* Second line - "Cols:NNN" */ fgets(header_line, 16, board); s = __re_get_first_match("^Cols: *(\\d{1,10})$", header_line); if (!s) { free(s); return 1; } game->cols = (size_t) strtol(s, &endptr, 10); if (*endptr != '\0') { free(s); return 1; } else { free(s); } /* Allocate memory for the board */ if (game->board) free(game->board); game->board = MEM_ALLOC_N(char, game->cols * game->rows); /* Read game->rows lines describing the board */ sprintf(boardline_re, "^([#.]{%zu})$", game->cols); line = MEM_ALLOC_N(char, game->cols + 2); for (i = 0; i < game->rows; i++) { fgets(line, game->cols + 2, board); s = __re_get_first_match(boardline_re, line); if (!s) { free(line); free(s); return 1; } for (j = 0; j < game->cols; j++) { if (s[j] == '#') game_set_alive(game, i, j); else game_set_dead(game, i, j); } free(s); } free(line); return 0; } /** * Analyzes a particular part of the game board and update its state to the next generation. * * @param t Pointer to a GameInfo structure. */ void process_slice(GameInfo *tinfo) { char live_count; size_t row, col; #pragma omp parallel for private(row, live_count) shared(tinfo) default(none) for (col = 0; col < tinfo->game->cols; col++) { for (row = 0; row < tinfo->game->rows; row++) { live_count = 0; /* Count the living neighbour cells */ if (game_is_alive(tinfo->game, row, col + 1)) live_count++; if (game_is_alive(tinfo->game, row + 1, col)) live_count++; if (game_is_alive(tinfo->game, row + 1, col + 1)) live_count++; if (row > 0) { if (game_is_alive(tinfo->game, row - 1, col)) live_count++; if (game_is_alive(tinfo->game, row - 1, col + 1)) live_count++; } if (col > 0) { if (game_is_alive(tinfo->game, row, col - 1)) live_count++; if (game_is_alive(tinfo->game, row + 1, col - 1)) live_count++; } if ((row > 0) && (col > 0)) if (game_is_alive(tinfo->game, row - 1, col - 1)) live_count++; /* Apply the game's rules to the current cell */ if ((live_count < 2) || (live_count > 3)) tinfo->new_board[row * tinfo->game->cols + col] = 0; else if (live_count == 3) tinfo->new_board[row * tinfo->game->cols + col] = 1; else tinfo->new_board[row * tinfo->game->cols + col] = tinfo->game->board[row * tinfo->game->cols + col]; } } } void game_free(Game *game) { if (game) { if (game->board) free(game->board); free(game); } } int game_is_alive(Game *game, size_t row, size_t col) { assert(game); assert(game->board); if ((row >= game->rows) || (col >= game->cols)) return 0; return game->board[row * game->cols + col] == 1; } int game_is_dead(Game *game, size_t row, size_t col) { return !game_is_alive(game, row, col); } Game *game_new(void) { Game *game = MEM_ALLOC(Game); game->board = NULL; game->cols = 0; game->rows = 0; return game; } int game_parse_board(Game *game, GameConfig *config) { FILE *board; int exit_code; long input_file_pos; assert(game); assert(config); assert(config->input_file); board = config->input_file; input_file_pos = ftell(board); fseek(board, 0, SEEK_SET); exit_code = __parse_custom_format(game, board); fseek(board, input_file_pos, SEEK_SET); return exit_code; } void game_print_board(Game *game) { size_t col, row; assert(game); assert(game->board); for (row = 0; row < game->rows; row++) { for (col = 0; col < game->cols; col++) { printf("%c", game_is_alive(game, row, col) ? '#' : '.'); } printf("\n"); } } void game_set_alive(Game *game, size_t row, size_t col) { assert(game); assert(game->board); assert(row < game->rows); assert(col < game->cols); game->board[row * game->cols + col] = 1; } void game_set_dead(Game *game, size_t row, size_t col) { assert(game); assert(game->board); assert(row < game->rows); assert(col < game->cols); game->board[row * game->cols + col] = 0; } int game_tick(Game *game) { char *new_board; int retval = 0; GameInfo *tinfo; size_t tnum = 0; new_board = MEM_ALLOC_N(char, game->rows * game->cols); tinfo = MEM_ALLOC(GameInfo); tinfo->game = game; tinfo->new_board = new_board; process_slice(&tinfo[tnum]); /* Make game use the new board and drop the old one */ free(game->board); game->board = new_board; free(tinfo); return retval; }
parallel_macros.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2013, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de> // ========================================================================== // Utility macros for parallelism. // ========================================================================== #ifndef SEQAN_PARALLEL_PARALLEL_MACROS_H_ #define SEQAN_PARALLEL_PARALLEL_MACROS_H_ /** .Macro.SEQAN_OMP_PRAGMA ..summary:Portable conditional $#pragma$ issuing if OpenMP is enabled. ..cat:Parallelism ..signature:SEQAN_OMP_PRAGMA(x) ..param.x:The string to issue behind $#pragma omp$. ..remarks:This macro uses portable pragma generation, dependent on the macro $_OPENMP$ being defined (as by the OpenMP standard). ..remarks:This is useful for disabling OpenMP pragmas on compilers that do not support OpenMP to suppress warnings. ..example.text:Parallelize loop with OpenMP if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel for) // becomes: #pragma omp parallel for for (int i = 0; i < x; ++i) { // Do work. } ..example.text:Make an addition atomic if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel atomic) // becomes: #pragma omp parallel atomic i += 1; */ #ifdef _OPENMP #include <omp.h> #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // GCC _Pragma operator #define SEQAN_DO_PRAGMA(x) _Pragma(#x) #define SEQAN_OMP_PRAGMA(x) SEQAN_DO_PRAGMA(omp x) #else // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // MSVC __pragma-operator #define SEQAN_OMP_PRAGMA(x) __pragma (omp x) #endif // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) #else // #ifdef _OPENMP #define SEQAN_OMP_PRAGMA(x) // low-level OpenMP runtime compatibility inline void omp_set_num_threads(int) {} inline int omp_get_num_threads() { return 1; } inline int omp_get_max_threads() { return 1; } inline int omp_get_thread_num() { return 0; } #endif // #ifdef _OPENMP #endif // SEQAN_PARALLEL_PARALLEL_MACROS_H_
nmt_covar_flat.c
#include "config.h" #include "utils.h" static fcomplex *product_and_transform(nmt_flatsky_info *fs,flouble *m1,flouble *m2) { flouble *m12=dftw_malloc(fs->npix*sizeof(flouble)); fs_map_product(fs,m1,m2,m12); fcomplex *cm12=dftw_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex)); fs_map2alm(fs,1,0,&m12,&cm12); dftw_free(m12); return cm12; } static nmt_binning_scheme_flat *nmt_bins_copy(nmt_binning_scheme_flat *b_or) { nmt_binning_scheme_flat *b=my_malloc(sizeof(nmt_binning_scheme_flat)); b->n_bands=b_or->n_bands; b->ell_0_list=my_malloc(b->n_bands*sizeof(flouble)); memcpy(b->ell_0_list,b_or->ell_0_list,b->n_bands*sizeof(flouble)); b->ell_f_list=my_malloc(b->n_bands*sizeof(flouble)); memcpy(b->ell_f_list,b_or->ell_f_list,b->n_bands*sizeof(flouble)); return b; } nmt_covar_workspace_flat *nmt_covar_workspace_flat_init(nmt_field_flat *fla1,nmt_field_flat *fla2, nmt_binning_scheme_flat *ba, nmt_field_flat *flb1,nmt_field_flat *flb2, nmt_binning_scheme_flat *bb) { int ii; if((fla1->fs->nx!=fla2->fs->nx) || (fla1->fs->ny!=fla2->fs->ny) || (fla1->fs->nx!=flb1->fs->nx) || (fla1->fs->ny!=flb1->fs->ny) || (fla1->fs->nx!=flb2->fs->nx) || (fla1->fs->ny!=flb2->fs->ny)) report_error(NMT_ERROR_COVAR,"Can't compute covariance for fields with different resolutions\n"); nmt_flatsky_info *fs=fla1->fs; if(ba->n_bands!=bb->n_bands) report_error(NMT_ERROR_COVAR,"Can't compute covariance for different binning schemes\n"); nmt_covar_workspace_flat *cw=my_malloc(sizeof(nmt_covar_workspace_flat)); cw->bin=nmt_bins_copy(ba); cw->xi00_1122=my_malloc(cw->bin->n_bands*sizeof(flouble *)); cw->xi00_1221=my_malloc(cw->bin->n_bands*sizeof(flouble *)); cw->xi02_1122=my_malloc(cw->bin->n_bands*sizeof(flouble *)); cw->xi02_1221=my_malloc(cw->bin->n_bands*sizeof(flouble *)); cw->xi22p_1122=my_malloc(cw->bin->n_bands*sizeof(flouble *)); cw->xi22p_1221=my_malloc(cw->bin->n_bands*sizeof(flouble *)); cw->xi22m_1122=my_malloc(cw->bin->n_bands*sizeof(flouble *)); cw->xi22m_1221=my_malloc(cw->bin->n_bands*sizeof(flouble *)); for(ii=0;ii<cw->bin->n_bands;ii++) { cw->xi00_1122[ii]=my_calloc(cw->bin->n_bands,sizeof(flouble)); cw->xi00_1221[ii]=my_calloc(cw->bin->n_bands,sizeof(flouble)); cw->xi02_1122[ii]=my_calloc(cw->bin->n_bands,sizeof(flouble)); cw->xi02_1221[ii]=my_calloc(cw->bin->n_bands,sizeof(flouble)); cw->xi22p_1122[ii]=my_calloc(cw->bin->n_bands,sizeof(flouble)); cw->xi22p_1221[ii]=my_calloc(cw->bin->n_bands,sizeof(flouble)); cw->xi22m_1122[ii]=my_calloc(cw->bin->n_bands,sizeof(flouble)); cw->xi22m_1221[ii]=my_calloc(cw->bin->n_bands,sizeof(flouble)); } int *n_cells=my_calloc(cw->bin->n_bands,sizeof(int)); //Multiply masks and Fourier-transform fcomplex *cm_a1b1=product_and_transform(fs,fla1->mask,flb1->mask); fcomplex *cm_a1b2=product_and_transform(fs,fla1->mask,flb2->mask); fcomplex *cm_a2b1=product_and_transform(fs,fla2->mask,flb1->mask); fcomplex *cm_a2b2=product_and_transform(fs,fla2->mask,flb2->mask); //Compute squared-mask power spectra int *i_band,*i_band_nocut; flouble *cl_mask_1122=my_malloc(fs->npix*sizeof(double)); flouble *cl_mask_1221=my_malloc(fs->npix*sizeof(double)); flouble *cosarr=dftw_malloc(fs->npix*sizeof(double)); flouble *sinarr=dftw_malloc(fs->npix*sizeof(double)); i_band=my_malloc(fs->npix*sizeof(int)); i_band_nocut=my_malloc(fs->npix*sizeof(int)); #pragma omp parallel default(none) \ shared(cw,fs,cm_a1b1,cm_a1b2,cm_a2b1,cm_a2b2,n_cells) \ shared(i_band_nocut,i_band,cl_mask_1122,cl_mask_1221) \ shared(cosarr,sinarr) { flouble dkx=2*M_PI/fs->lx; flouble dky=2*M_PI/fs->ly; int *n_cells_thr=my_calloc(cw->bin->n_bands,sizeof(int)); int iy1,ix1; #pragma omp for for(iy1=0;iy1<fs->ny;iy1++) { flouble ky; int ik=0; if(2*iy1<=fs->ny) ky=iy1*dky; else ky=-(fs->ny-iy1)*dky; for(ix1=0;ix1<fs->nx;ix1++) { flouble kx,kmod,c,s; int ix_here,index_here,index; index=ix1+fs->nx*iy1; if(2*ix1<=fs->nx) { kx=ix1*dkx; ix_here=ix1; } else { kx=-(fs->nx-ix1)*dkx; ix_here=fs->nx-ix1; } index_here=ix_here+(fs->nx/2+1)*iy1; cl_mask_1122[index]=(creal(cm_a1b1[index_here])*creal(cm_a2b2[index_here])+ cimag(cm_a1b1[index_here])*cimag(cm_a2b2[index_here])); cl_mask_1221[index]=(creal(cm_a1b2[index_here])*creal(cm_a2b1[index_here])+ cimag(cm_a1b2[index_here])*cimag(cm_a2b1[index_here])); kmod=sqrt(kx*kx+ky*ky); ik=nmt_bins_flat_search_fast(cw->bin,kmod,ik); if(ik>=0) { i_band[index]=ik; n_cells_thr[ik]++; } else i_band[index]=-1; i_band_nocut[index]=ik; if(kmod>0) { c=kx/kmod; s=ky/kmod; } else { c=1.; s=0.; } cosarr[index]=c*c-s*s; sinarr[index]=2*s*c; } } //end omp for #pragma omp critical { for(iy1=0;iy1<cw->bin->n_bands;iy1++) n_cells[iy1]+=n_cells_thr[iy1]; } //end omp critical free(n_cells_thr); } //end omp parallel dftw_free(cm_a1b1); dftw_free(cm_a1b2); dftw_free(cm_a2b1); dftw_free(cm_a2b2); //Compute Xis #pragma omp parallel default(none) \ shared(fs,i_band,cw,cl_mask_1122,cl_mask_1221) \ shared(cosarr,sinarr) { int iy1,ix1,ix2,iy2; flouble **xi00_1122=my_malloc(cw->bin->n_bands*sizeof(flouble *)); flouble **xi00_1221=my_malloc(cw->bin->n_bands*sizeof(flouble *)); flouble **xi02_1122=my_malloc(cw->bin->n_bands*sizeof(flouble *)); flouble **xi02_1221=my_malloc(cw->bin->n_bands*sizeof(flouble *)); flouble **xi22p_1122=my_malloc(cw->bin->n_bands*sizeof(flouble *)); flouble **xi22p_1221=my_malloc(cw->bin->n_bands*sizeof(flouble *)); flouble **xi22m_1122=my_malloc(cw->bin->n_bands*sizeof(flouble *)); flouble **xi22m_1221=my_malloc(cw->bin->n_bands*sizeof(flouble *)); for(iy1=0;iy1<cw->bin->n_bands;iy1++) { xi00_1122[iy1]=my_calloc(cw->bin->n_bands,sizeof(flouble)); xi00_1221[iy1]=my_calloc(cw->bin->n_bands,sizeof(flouble)); xi02_1122[iy1]=my_calloc(cw->bin->n_bands,sizeof(flouble)); xi02_1221[iy1]=my_calloc(cw->bin->n_bands,sizeof(flouble)); xi22p_1122[iy1]=my_calloc(cw->bin->n_bands,sizeof(flouble)); xi22p_1221[iy1]=my_calloc(cw->bin->n_bands,sizeof(flouble)); xi22m_1122[iy1]=my_calloc(cw->bin->n_bands,sizeof(flouble)); xi22m_1221[iy1]=my_calloc(cw->bin->n_bands,sizeof(flouble)); } #pragma omp for for(iy1=0;iy1<fs->ny;iy1++) { for(ix1=0;ix1<fs->nx;ix1++) { int index1=ix1+fs->nx*iy1; int ik1=i_band[index1]; if(ik1>=0) { for(iy2=0;iy2<fs->ny;iy2++) { for(ix2=0;ix2<fs->nx;ix2++) { int index,index2=ix2+fs->nx*iy2; int ik2=i_band[index2]; flouble cdiff=1,sdiff=0; int iy=iy1-iy2; int ix=ix1-ix2; if(iy<0) iy+=fs->ny; if(ix<0) ix+=fs->nx; index=ix+fs->nx*iy; if(ik2>=0) { double clm1122=cl_mask_1122[index]; double clm1221=cl_mask_1221[index]; cdiff=cosarr[index1]*cosarr[index2]+sinarr[index1]*sinarr[index2]; sdiff=sinarr[index1]*cosarr[index2]-cosarr[index1]*sinarr[index2]; xi00_1122[ik1][ik2]+=clm1122; xi00_1221[ik1][ik2]+=clm1221; xi02_1122[ik1][ik2]+=clm1122*cdiff; xi02_1221[ik1][ik2]+=clm1221*cdiff; xi22p_1122[ik1][ik2]+=clm1122*cdiff*cdiff; xi22p_1221[ik1][ik2]+=clm1221*cdiff*cdiff; xi22m_1122[ik1][ik2]+=clm1122*sdiff*sdiff; xi22m_1221[ik1][ik2]+=clm1221*sdiff*sdiff; } } } } } } //end omp for #pragma omp critical { for(iy1=0;iy1<cw->bin->n_bands;iy1++) { for(iy2=0;iy2<cw->bin->n_bands;iy2++) { cw->xi00_1122[iy1][iy2]+=xi00_1122[iy1][iy2]; cw->xi00_1221[iy1][iy2]+=xi00_1221[iy1][iy2]; cw->xi02_1122[iy1][iy2]+=xi02_1122[iy1][iy2]; cw->xi02_1221[iy1][iy2]+=xi02_1221[iy1][iy2]; cw->xi22p_1122[iy1][iy2]+=xi22p_1122[iy1][iy2]; cw->xi22p_1221[iy1][iy2]+=xi22p_1221[iy1][iy2]; cw->xi22m_1122[iy1][iy2]+=xi22m_1122[iy1][iy2]; cw->xi22m_1221[iy1][iy2]+=xi22m_1221[iy1][iy2]; } } } //end omp critical for(iy1=0;iy1<cw->bin->n_bands;iy1++) { free(xi00_1122[iy1]); free(xi00_1221[iy1]); free(xi02_1122[iy1]); free(xi02_1221[iy1]); free(xi22p_1122[iy1]); free(xi22p_1221[iy1]); free(xi22m_1122[iy1]); free(xi22m_1221[iy1]); } free(xi00_1122); free(xi00_1221); free(xi02_1122); free(xi02_1221); free(xi22p_1122); free(xi22p_1221); free(xi22m_1122); free(xi22m_1221); } //end omp parallel #pragma omp parallel default(none) \ shared(fs,cw,n_cells) { int ib1; flouble fac_norm=4*M_PI*M_PI/(fs->lx*fs->lx*fs->ly*fs->ly); #pragma omp for for(ib1=0;ib1<cw->bin->n_bands;ib1++) { int ib2; for(ib2=0;ib2<cw->bin->n_bands;ib2++) { flouble norm; if(n_cells[ib1]*n_cells[ib2]>0) norm=fac_norm/(n_cells[ib1]*n_cells[ib2]); else norm=0; cw->xi00_1122[ib1][ib2]*=norm; cw->xi00_1221[ib1][ib2]*=norm; cw->xi02_1122[ib1][ib2]*=norm; cw->xi02_1221[ib1][ib2]*=norm; cw->xi22p_1122[ib1][ib2]*=norm; cw->xi22p_1221[ib1][ib2]*=norm; cw->xi22m_1122[ib1][ib2]*=norm; cw->xi22m_1221[ib1][ib2]*=norm; } } //end omp for } //end omp parallel free(i_band); free(i_band_nocut); free(cl_mask_1122); free(cl_mask_1221); dftw_free(cosarr); dftw_free(sinarr); free(n_cells); return cw; } void nmt_covar_workspace_flat_free(nmt_covar_workspace_flat *cw) { int ii; for(ii=0;ii<cw->bin->n_bands;ii++) { free(cw->xi00_1122[ii]); free(cw->xi00_1221[ii]); free(cw->xi02_1122[ii]); free(cw->xi02_1221[ii]); free(cw->xi22p_1122[ii]); free(cw->xi22p_1221[ii]); free(cw->xi22m_1122[ii]); free(cw->xi22m_1221[ii]); } free(cw->xi00_1122); free(cw->xi00_1221); free(cw->xi02_1122); free(cw->xi02_1221); free(cw->xi22p_1122); free(cw->xi22p_1221); free(cw->xi22m_1122); free(cw->xi22m_1221); nmt_bins_flat_free(cw->bin); free(cw); } void nmt_compute_gaussian_covariance_flat(nmt_covar_workspace_flat *cw, int spin_a,int spin_b,int spin_c,int spin_d, nmt_workspace_flat *wa,nmt_workspace_flat *wb, int nl,flouble *larr, flouble **clac,flouble **clad, flouble **clbc,flouble **clbd,flouble *covar_out) { if((wa->bin->n_bands!=cw->bin->n_bands) || (wb->bin->n_bands!=cw->bin->n_bands)) report_error(NMT_ERROR_COVAR,"Coupling coefficients were computed for a different binning scheme\n"); int nmaps_a=spin_a ? 2 : 1; int nmaps_b=spin_b ? 2 : 1; int nmaps_c=spin_c ? 2 : 1; int nmaps_d=spin_d ? 2 : 1; if((wa->ncls!=nmaps_a*nmaps_b) || (wb->ncls!=nmaps_c*nmaps_d)) report_error(NMT_ERROR_COVAR,"Input spins don't match input workspaces\n"); //Compute binned spectra int i_cl; flouble **cblac=my_malloc(nmaps_a*nmaps_c*sizeof(flouble *)); for(i_cl=0;i_cl<nmaps_a*nmaps_c;i_cl++) cblac[i_cl]=my_malloc(cw->bin->n_bands*sizeof(flouble)); nmt_bin_cls_flat(cw->bin,nl,larr,clac,cblac,nmaps_a*nmaps_c); flouble **cblad=my_malloc(nmaps_a*nmaps_d*sizeof(flouble *)); for(i_cl=0;i_cl<nmaps_a*nmaps_d;i_cl++) cblad[i_cl]=my_malloc(cw->bin->n_bands*sizeof(flouble)); nmt_bin_cls_flat(cw->bin,nl,larr,clad,cblad,nmaps_a*nmaps_d); flouble **cblbc=my_malloc(nmaps_b*nmaps_c*sizeof(flouble *)); for(i_cl=0;i_cl<nmaps_b*nmaps_c;i_cl++) cblbc[i_cl]=my_malloc(cw->bin->n_bands*sizeof(flouble)); nmt_bin_cls_flat(cw->bin,nl,larr,clbc,cblbc,nmaps_b*nmaps_c); flouble **cblbd=my_malloc(nmaps_b*nmaps_d*sizeof(flouble *)); for(i_cl=0;i_cl<nmaps_b*nmaps_d;i_cl++) cblbd[i_cl]=my_malloc(cw->bin->n_bands*sizeof(flouble)); nmt_bin_cls_flat(cw->bin,nl,larr,clbd,cblbd,nmaps_b*nmaps_d); //Convolve with Xi gsl_matrix *covar_binned=gsl_matrix_alloc(wa->ncls*cw->bin->n_bands,wb->ncls*cw->bin->n_bands); #pragma omp parallel default(none) \ shared(cw,spin_a,spin_b,spin_c,spin_d) \ shared(wa,wb,nl,larr,covar_binned) \ shared(nmaps_a,nmaps_b,nmaps_c,nmaps_d) \ shared(cblac,cblad,cblbc,cblbd) { int band_a; #pragma omp for for(band_a=0;band_a<cw->bin->n_bands;band_a++) { int band_b; for(band_b=0;band_b<cw->bin->n_bands;band_b++) { int ia; double xis_1122[6]={cw->xi00_1122[band_a][band_b],cw->xi02_1122[band_a][band_b], cw->xi22p_1122[band_a][band_b],cw->xi22m_1122[band_a][band_b], -cw->xi22m_1122[band_a][band_b],0}; double xis_1221[6]={cw->xi00_1221[band_a][band_b],cw->xi02_1221[band_a][band_b], cw->xi22p_1221[band_a][band_b],cw->xi22m_1221[band_a][band_b], -cw->xi22m_1221[band_a][band_b],0}; for(ia=0;ia<nmaps_a;ia++) { int ib; for(ib=0;ib<nmaps_b;ib++) { int ic; int icl_a=ib+nmaps_b*ia; int index_a=wa->ncls*band_a+icl_a; for(ic=0;ic<nmaps_c;ic++) { int id; for(id=0;id<nmaps_d;id++) { int iap; int icl_b=id+nmaps_d*ic; int index_b=wb->ncls*band_b+icl_b; double cbinned=0; for(iap=0;iap<nmaps_a;iap++) { int ibp; for(ibp=0;ibp<nmaps_b;ibp++) { int icp; for(icp=0;icp<nmaps_c;icp++) { int idp; for(idp=0;idp<nmaps_d;idp++) { double *cl_ac=cblac[icp+nmaps_c*iap]; double *cl_ad=cblad[idp+nmaps_d*iap]; double *cl_bc=cblbc[icp+nmaps_c*ibp]; double *cl_bd=cblbd[idp+nmaps_d*ibp]; double fac_1122=0.5*(cl_ac[band_a]*cl_bd[band_b]+cl_ac[band_b]*cl_bd[band_a]); double fac_1221=0.5*(cl_ad[band_a]*cl_bc[band_b]+cl_ad[band_b]*cl_bc[band_a]); int ind_1122=cov_get_coupling_pair_index(nmaps_a,nmaps_c,nmaps_b,nmaps_d, ia,iap,ic,icp,ib,ibp,id,idp); int ind_1221=cov_get_coupling_pair_index(nmaps_a,nmaps_d,nmaps_b,nmaps_c, ia,iap,id,idp,ib,ibp,ic,icp); cbinned+=xis_1122[ind_1122]*fac_1122+xis_1221[ind_1221]*fac_1221; } } } } gsl_matrix_set(covar_binned,index_a,index_b,cbinned); } } } } } } //end omp for } //end omp parallel //Sandwich with inverse MCM gsl_matrix *covar_out_g =gsl_matrix_alloc(wa->ncls*cw->bin->n_bands,wb->ncls*cw->bin->n_bands); gsl_matrix *mat_tmp =gsl_matrix_alloc(wa->ncls*cw->bin->n_bands,wb->ncls*cw->bin->n_bands); gsl_matrix *inverse_a =gsl_matrix_alloc(wa->ncls*cw->bin->n_bands,wa->ncls*cw->bin->n_bands); gsl_matrix *inverse_b =gsl_matrix_alloc(wb->ncls*cw->bin->n_bands,wb->ncls*cw->bin->n_bands); gsl_linalg_LU_invert(wb->coupling_matrix_binned_gsl,wb->coupling_matrix_perm,inverse_b); //M_b^-1 gsl_linalg_LU_invert(wa->coupling_matrix_binned_gsl,wa->coupling_matrix_perm,inverse_a); //M_a^-1 gsl_blas_dgemm(CblasNoTrans,CblasTrans ,1,covar_binned,inverse_b,0,mat_tmp ); //tmp = C * M_b^-1^T gsl_blas_dgemm(CblasNoTrans,CblasNoTrans,1,inverse_a ,mat_tmp ,0,covar_out_g); //C' = M_a^-1 * C * M_b^-1^T //Flatten int ii; long elem=0; for(ii=0;ii<wa->ncls*cw->bin->n_bands;ii++) { int jj; for(jj=0;jj<wb->ncls*cw->bin->n_bands;jj++) { covar_out[elem]=gsl_matrix_get(covar_out_g,ii,jj); elem++; } } for(i_cl=0;i_cl<nmaps_a*nmaps_c;i_cl++) free(cblac[i_cl]); free(cblac); for(i_cl=0;i_cl<nmaps_a*nmaps_d;i_cl++) free(cblad[i_cl]); free(cblad); for(i_cl=0;i_cl<nmaps_b*nmaps_c;i_cl++) free(cblbc[i_cl]); free(cblbc); for(i_cl=0;i_cl<nmaps_b*nmaps_d;i_cl++) free(cblbd[i_cl]); free(cblbd); gsl_matrix_free(mat_tmp); gsl_matrix_free(inverse_a); gsl_matrix_free(inverse_b); gsl_matrix_free(covar_out_g); gsl_matrix_free(covar_binned); }
kmeans_utils.c
#include "kmeans_utils.h" #include <math.h> #include <float.h> #include "kmeans.h" #include "../../utils/fcl_logging.h" #include "../../utils/fcl_time.h" #include "../../utils/matrix/csr_matrix/csr_to_vector_list.h" #include "../../utils/matrix/csr_matrix/csr_math.h" #include "../../utils/matrix/vector_list/vector_list_math.h" #include "../../utils/matrix/csr_matrix/csr_assign.h" #include "../../utils/matrix/vector_list/vector_list_to_csr.h" #include "../../utils/matrix/vector_list/vector_list.h" #include "../../utils/vector/sparse/sparse_vector_math.h" #define UPDATE_TYPE_KMEANS UINT32_C(0) #define UPDATE_TYPE_MINIBATCH_KMEANS UINT32_C(1) typedef void (*kmeans_init_function) (struct general_kmeans_context* ctx , struct kmeans_params *prms); typedef void (*kmeans_preinit_function) (struct csr_matrix *mtrx, struct kmeans_params *prms); kmeans_init_function KMEANS_INIT_FUNCTIONS[NO_KMEANS_INITS] \ = {initialize_kmeans_random, initialize_kmeans_pp, initialize_kmeans_init_params}; kmeans_preinit_function KMEANS_PREINIT_FUNCTIONS[NO_KMEANS_INITS] \ = {NULL, NULL, preinitialize_kmeans_init_params}; void get_kmeanspp_assigns(struct csr_matrix *mtrx , struct csr_matrix *blockvectors_mtrx , struct sparse_vector* pca_projection_samples , VALUE_TYPE *sparse_vector_lengths , VALUE_TYPE *pca_sparse_vector_lengths , uint64_t no_clusters , uint64_t *cluster_counts , uint64_t *cluster_assignments , VALUE_TYPE *cluster_distances , uint32_t* seed , uint64_t use_triangle_inequality , uint64_t *initial_cluster_samples , uint32_t verbose , struct cdict* tr , uint32_t* stop); void free_general_context(struct general_kmeans_context* ctx , struct kmeans_params *prms) { free_vector_list(ctx->cluster_vectors, ctx->no_clusters); free_null(ctx->cluster_vectors); free_cluster_hashmaps(ctx->clusters_raw, ctx->no_clusters); free_null(ctx->clusters_raw); free_null(ctx->cluster_distances); free_null(ctx->cluster_assignments); free_null(ctx->initial_cluster_samples); free_null(ctx->cluster_counts); free_null(ctx->vector_lengths_samples); free_null(ctx->vector_lengths_clusters); free_null(ctx->clusters_not_changed); free_null(ctx->was_assigned); free_null(ctx->previous_cluster_assignments); } void free_kmeans_result(struct kmeans_result* res) { if (res->clusters != NULL) { free_csr_matrix(res->clusters); free_null(res->clusters); } free_init_params(res->initprms); free_null(res->initprms); free_null(res); } void initialize_kmeans_random(struct general_kmeans_context* ctx, struct kmeans_params *prms) { uint64_t i; /* use random samples as clusters */ create_vector_list_random(ctx->samples , ctx->cluster_vectors , ctx->no_clusters , &(prms->seed) , ctx->initial_cluster_samples); /* assign a random cluster to every sample */ for (i = 0; i < ctx->samples->sample_count; i++) { ctx->cluster_assignments[i] = i % ctx->no_clusters; } } void preinitialize_kmeans_init_params(struct csr_matrix *samples, struct kmeans_params *prms) { uint64_t i; uint32_t no_clusters; no_clusters = 0; if (prms->initprms == NULL) { if (prms->verbose) LOG_ERROR("Initprms are empty"); goto error_invalid_init_data; } if (prms->initprms->assignments == NULL) { if (prms->verbose) LOG_ERROR("Init assignments are empty"); goto error_invalid_init_data; } if (prms->initprms->len_assignments != samples->sample_count) { if (prms->verbose) LOG_ERROR("Init assignments have invalid length %" PRINTF_INT64_MODIFIER "u != %" PRINTF_INT64_MODIFIER "u", prms->initprms->len_assignments, samples->sample_count); goto error_invalid_init_data; } for (i = 0; i < samples->sample_count; i++) { if (prms->initprms->assignments[i] > no_clusters) { no_clusters = prms->initprms->assignments[i]; } } no_clusters += 1; if (no_clusters > prms->initprms->len_initial_cluster_samples) { if (prms->verbose) LOG_ERROR("no_clusters > len_initial_cluster_samples"); goto error_invalid_init_data; } if (prms->initprms->len_initial_cluster_samples > no_clusters) { no_clusters = prms->initprms->len_initial_cluster_samples; } for (i = 0; i < prms->initprms->len_initial_cluster_samples; i++) { if (prms->initprms->initial_cluster_samples[i] >= samples->sample_count) { if (prms->verbose) LOG_ERROR("prms->initprms->initial_cluster_samples[i] >= samples->sample_count"); goto error_invalid_init_data; } } goto init_data_correct; error_invalid_init_data: if (prms->verbose) LOG_ERROR("Invalid assignment_list data. Using random init instead with k=10."); prms->init_id = KMEANS_INIT_RANDOM; prms->no_clusters = 10; return; init_data_correct: prms->no_clusters = no_clusters; } void initialize_kmeans_init_params(struct general_kmeans_context* ctx, struct kmeans_params *prms) { uint64_t i; KEY_TYPE *keys; VALUE_TYPE *values; uint64_t nnz; for (i = 0; i < ctx->samples->sample_count; i++) { ctx->cluster_assignments[i] = prms->initprms->assignments[i]; ctx->cluster_counts[ctx->cluster_assignments[i]] += 1; } for (i = 0; i < ctx->samples->sample_count; i++) { keys = ctx->samples->keys + ctx->samples->pointers[i]; values = ctx->samples->values + ctx->samples->pointers[i]; nnz = ctx->samples->pointers[i + 1] - ctx->samples->pointers[i]; add_sample_to_hashmap(ctx->clusters_raw, keys, values, nnz, ctx->cluster_assignments[i]); ctx->was_assigned[i] = 1; } for (i = 0; i < prms->no_clusters; i++) { HASH_SORT(ctx->clusters_raw[i], id_sort); } create_vector_list_from_hashmap(ctx->clusters_raw , ctx->cluster_counts , ctx->cluster_vectors , ctx->no_clusters); for (i = 0; i < prms->initprms->len_initial_cluster_samples; i++) { ctx->initial_cluster_samples[i] = prms->initprms->initial_cluster_samples[i]; } } void initialize_kmeans_pp(struct general_kmeans_context* ctx, struct kmeans_params *prms) { uint64_t i; KEY_TYPE *keys; VALUE_TYPE *values; uint64_t nnz; uint64_t use_triangle_inequality; /* Block vector variables */ uint64_t block_vectors_dim; /* size of block vectors */ VALUE_TYPE desired_bv_annz; /* desired size of the block vectors */ struct csr_matrix block_vectors_samples; /* block vector matrix of samples */ /* PCA variables */ uint64_t use_pca; /* desired size of the block vectors */ VALUE_TYPE* vector_lengths_pca_samples; struct sparse_vector* pca_projection_samples; pca_projection_samples = NULL; vector_lengths_pca_samples = NULL; block_vectors_samples.sample_count = 0; desired_bv_annz = d_get_subfloat_default(&(prms->tr) , "additional_params", "kmpp_bv_annz", 0); use_triangle_inequality = d_get_subint_default(&(prms->tr) , "additional_params", "kmpp_use_triangle_inequality", 0); use_pca = d_get_subint_default(&(prms->tr) , "additional_params", "kmpp_use_pca", 0); if (use_pca && prms->ext_vects != NULL) { if (prms->verbose) LOG_INFO("kmeans++ pca activated"); pca_projection_samples = matrix_dot(ctx->samples, prms->ext_vects); calculate_vector_list_lengths(pca_projection_samples, ctx->samples->sample_count, &vector_lengths_pca_samples); } if (desired_bv_annz > 0) { if (prms->verbose) LOG_INFO("kmeans++ block vectors activated with annz: %.3f", desired_bv_annz); determine_block_vectors_for_matrix(ctx->samples , desired_bv_annz , &block_vectors_samples , &block_vectors_dim); if (prms->verbose) LOG_INFO("kmeans++ done getting block vector matrix"); } get_kmeanspp_assigns(ctx->samples , &block_vectors_samples , pca_projection_samples , ctx->vector_lengths_samples , vector_lengths_pca_samples , prms->no_clusters , ctx->cluster_counts , ctx->cluster_assignments , ctx->cluster_distances , &(prms->seed) , use_triangle_inequality , ctx->initial_cluster_samples , prms->verbose , prms->tr , &(prms->stop)); if (desired_bv_annz > 0) { free_csr_matrix(&block_vectors_samples); } if (use_pca && prms->ext_vects != NULL) { free_vector_list(pca_projection_samples, ctx->samples->sample_count); free(vector_lengths_pca_samples); free(pca_projection_samples); } for (i = 0; i < ctx->samples->sample_count; i++) { keys = ctx->samples->keys + ctx->samples->pointers[i]; values = ctx->samples->values + ctx->samples->pointers[i]; nnz = ctx->samples->pointers[i + 1] - ctx->samples->pointers[i]; add_sample_to_hashmap(ctx->clusters_raw, keys, values, nnz, ctx->cluster_assignments[i]); ctx->was_assigned[i] = 1; } for (i = 0; i < prms->no_clusters; i++) { HASH_SORT(ctx->clusters_raw[i], id_sort); } create_vector_list_from_hashmap(ctx->clusters_raw , ctx->cluster_counts , ctx->cluster_vectors , ctx->no_clusters); } void get_kmeanspp_assigns(struct csr_matrix *mtrx , struct csr_matrix *blockvectors_mtrx , struct sparse_vector* pca_projection_samples , VALUE_TYPE *sparse_vector_lengths , VALUE_TYPE *pca_sparse_vector_lengths , uint64_t no_clusters , uint64_t *cluster_counts , uint64_t *cluster_assignments , VALUE_TYPE *cluster_distances , uint32_t* seed , uint64_t use_triangle_inequality , uint64_t *initial_cluster_samples , uint32_t verbose , struct cdict* tr , uint32_t* stop) { uint64_t no_clusters_so_far, i, j, calcs_skipped_tr, calcs_skipped_bv, calcs_skipped_pca, calcs_skipped_is_cluster; VALUE_TYPE rand_max; VALUE_TYPE approximated_full_distance_calcs_bv; VALUE_TYPE approximated_full_distance_calcs_pca; uint64_t mtrx_annz; uint64_t calcs_needed; uint64_t *is_cluster; VALUE_TYPE min_distances_cluster_new_cluster; time_t start; no_clusters_so_far = 0; calcs_needed = 0; rand_max = RAND_MAX; approximated_full_distance_calcs_bv = 0; approximated_full_distance_calcs_pca = 0; calcs_skipped_tr = 0; calcs_skipped_bv = 0; calcs_skipped_pca = 0; calcs_skipped_is_cluster = 0; start = time(NULL); is_cluster = (uint64_t*) calloc(mtrx->sample_count, sizeof(uint64_t)); /* choose the first sample randomly from all samples */ initial_cluster_samples[no_clusters_so_far] = rand_r(seed) % mtrx->sample_count; no_clusters_so_far += 1; /* initialize all distances with infinity */ for (i = 0; i < mtrx->sample_count; i++) { cluster_distances[i] = DBL_MAX; } min_distances_cluster_new_cluster = 0; while (no_clusters_so_far <= no_clusters && !(*stop)) { uint64_t cluster_id; VALUE_TYPE sum; if (no_clusters_so_far % 500 == 0) { if (verbose) LOG_INFO("kmeans++ chosen_clusters so far: %" PRINTF_INT64_MODIFIER "u (%d secs).. %" PRINTF_INT64_MODIFIER "u %" PRINTF_INT64_MODIFIER "u", no_clusters_so_far, (int) (time(NULL) - start), calcs_skipped_tr, calcs_skipped_bv); start = time(NULL); } /* no need to iterate over all clusters, only the recently added cluster is new info */ cluster_id = initial_cluster_samples[no_clusters_so_far - 1]; #pragma omp parallel for schedule(dynamic, 1000) for (i = 0; i < mtrx->sample_count; i++) { uint64_t sample_id; VALUE_TYPE dist; sample_id = i; if (omp_get_thread_num() == 0) check_signals(stop); if (!(*stop)) { if (is_cluster[sample_id]) { calcs_skipped_is_cluster += 1; continue; } if (use_triangle_inequality && (min_distances_cluster_new_cluster >= cluster_distances[sample_id])) { /* triangle inequality d(closest_cluster, new_cluster) >= 2 * d(sample, closest_cluster) * --> d(sample, new_cluster) >= d(sample, closest_cluster) */ calcs_skipped_tr += 1; continue; } if (blockvectors_mtrx->sample_count > 0) { dist = euclid_vector(blockvectors_mtrx->keys + blockvectors_mtrx->pointers[cluster_id] , blockvectors_mtrx->values + blockvectors_mtrx->pointers[cluster_id] , blockvectors_mtrx->pointers[cluster_id + 1] - blockvectors_mtrx->pointers[cluster_id] , blockvectors_mtrx->keys + blockvectors_mtrx->pointers[sample_id] , blockvectors_mtrx->values + blockvectors_mtrx->pointers[sample_id] , blockvectors_mtrx->pointers[sample_id + 1] - blockvectors_mtrx->pointers[sample_id] , sparse_vector_lengths[cluster_id] , sparse_vector_lengths[sample_id]); if (dist >= cluster_distances[sample_id]) { calcs_skipped_bv += 1; continue; } } if (pca_projection_samples != NULL) { dist = euclid_vector(pca_projection_samples[sample_id].keys , pca_projection_samples[sample_id].values , pca_projection_samples[sample_id].nnz , pca_projection_samples[cluster_id].keys , pca_projection_samples[cluster_id].values , pca_projection_samples[cluster_id].nnz , pca_sparse_vector_lengths[sample_id] , pca_sparse_vector_lengths[cluster_id]); if (dist >= cluster_distances[sample_id]) { calcs_skipped_pca += 1; continue; } } dist = euclid_vector(mtrx->keys + mtrx->pointers[cluster_id] , mtrx->values + mtrx->pointers[cluster_id] , mtrx->pointers[cluster_id + 1] - mtrx->pointers[cluster_id] , mtrx->keys + mtrx->pointers[sample_id] , mtrx->values + mtrx->pointers[sample_id] , mtrx->pointers[sample_id + 1] - mtrx->pointers[sample_id] , sparse_vector_lengths[cluster_id] , sparse_vector_lengths[sample_id]); calcs_needed += 1; #pragma omp critical if (dist < cluster_distances[sample_id]) { if (no_clusters_so_far != 1) { cluster_counts[cluster_assignments[sample_id]] -= 1; } cluster_distances[sample_id] = dist; cluster_assignments[sample_id] = no_clusters_so_far - 1; cluster_counts[cluster_assignments[sample_id]] += 1; } } } if (no_clusters_so_far == no_clusters) break; sum = 0; #pragma omp parallel for reduction(+:sum) for (j = 0; j < mtrx->sample_count; j++) { sum += cluster_distances[j]; } /* find new cluster depending on the closest distances from every sample to the already chosen clusters */ sum = (rand_r(seed) / rand_max) * sum; for (j = 0; j < mtrx->sample_count; j++) { sum -= cluster_distances[j]; if (sum < 0) break; } if (j == mtrx->sample_count) { initial_cluster_samples[no_clusters_so_far] = rand_r(seed) % mtrx->sample_count; } else { initial_cluster_samples[no_clusters_so_far] = j; } is_cluster[initial_cluster_samples[no_clusters_so_far]] = 1; min_distances_cluster_new_cluster = cluster_distances[initial_cluster_samples[no_clusters_so_far]] / 2; no_clusters_so_far++; } if (verbose) LOG_INFO("kmeans++ finished with %" PRINTF_INT64_MODIFIER "u clusters", no_clusters_so_far); if (verbose) LOG_INFO("kmeans++ calcs_needed %" PRINTF_INT64_MODIFIER "u/%" PRINTF_INT64_MODIFIER "u" , calcs_needed, (mtrx->sample_count * no_clusters) - calcs_skipped_is_cluster); mtrx_annz = mtrx->pointers[mtrx->sample_count] / mtrx->sample_count; d_add_subint(&tr, "kmeans++", "block_vectors_enabled", blockvectors_mtrx->sample_count > 0); if (blockvectors_mtrx->sample_count > 0) { VALUE_TYPE block_vectors_annz = blockvectors_mtrx->pointers[blockvectors_mtrx->sample_count] / blockvectors_mtrx->sample_count; d_add_subint(&tr, "kmeans++", "block_vectors_dim", blockvectors_mtrx->dim); d_add_subint(&tr, "kmeans++", "block_vectors_annz", block_vectors_annz); d_add_subfloat(&tr, "kmeans++", "block_vectors_relative_annz", block_vectors_annz / mtrx_annz); approximated_full_distance_calcs_bv = (block_vectors_annz / mtrx_annz) * (calcs_needed + calcs_skipped_bv); /* Use (block_vectors_annz / mtrx_annz) * (calcs_needed + calcs_skipped_bv) * to translate the number of block_vector euclidean distance calculations * into full euclidean distance calculations. Then add the approximated * full calculations and the done full calculations to get the overall * approximated full calculations. */ } d_add_subint(&tr, "kmeans++", "pca_enabled", pca_projection_samples != NULL); if (pca_projection_samples != NULL) { VALUE_TYPE pca_annz; KEY_TYPE max_sample_key; uint64_t pca_dim = 0; pca_annz = 0; for (i = 0; i < mtrx->sample_count; i++) { pca_annz += pca_projection_samples[i].nnz; if (pca_projection_samples[i].nnz > 0) { max_sample_key = pca_projection_samples[i].keys[pca_projection_samples[i].nnz - 1]; if (max_sample_key > pca_dim) { pca_dim = max_sample_key; } } } pca_dim += 1; pca_annz = pca_annz / mtrx->sample_count; d_add_subint(&tr, "kmeans++", "pca_dim", pca_dim); d_add_subint(&tr, "kmeans++", "pca_annz", pca_annz); d_add_subfloat(&tr, "kmeans++", "pca_relative_annz", pca_annz / mtrx_annz); approximated_full_distance_calcs_pca = (pca_annz / mtrx_annz) * (calcs_needed + calcs_skipped_pca); } if (pca_projection_samples != NULL || blockvectors_mtrx->sample_count > 0) { d_add_subint(&tr, "kmeans++", "approximated_full_distance_calcs" , (approximated_full_distance_calcs_bv + approximated_full_distance_calcs_pca + calcs_needed)); } d_add_subint(&tr, "kmeans++", "calculations_needed", calcs_needed); d_add_subint(&tr, "kmeans++", "calculations_needed_naive", (mtrx->sample_count * no_clusters) - calcs_skipped_is_cluster); free(is_cluster); } void pre_process_iteration(struct general_kmeans_context* ctx) { /* free old previous_cluster_assignments */ free_null(ctx->previous_cluster_assignments); /* copy cluster_assignments before iteration to previous_cluster_assignments */ ctx->previous_cluster_assignments = (uint64_t*) calloc(ctx->samples->sample_count, sizeof(uint64_t)); memcpy(ctx->previous_cluster_assignments, ctx->cluster_assignments, ctx->samples->sample_count * sizeof(uint64_t) ); /* reset all calculation counters */ ctx->done_calculations = 0; ctx->no_changes = 0; if (ctx->track_time) ctx->duration_all_calcs = clock(); gettimeofday(&(ctx->tm_start_iteration), NULL); gettimeofday(&(ctx->durations), NULL); } uint32_t batch_convergence(uint64_t no_samples , uint64_t samples_per_batch , VALUE_TYPE summed_batch_wcssd , uint32_t max_not_improved_counter , struct convergence_context* conv_ctx) { VALUE_TYPE ewa_wcssd, ewa_wcssd_min, alpha; uint32_t not_improved_counter; summed_batch_wcssd /= samples_per_batch; /* * Compute an Exponentially Weighted Average of the batch wcssd */ if (!conv_ctx->initialized) { ewa_wcssd = summed_batch_wcssd; ewa_wcssd_min = ewa_wcssd; not_improved_counter = 0; conv_ctx->initialized = 1; } else { ewa_wcssd = conv_ctx->ewa_wcssd; ewa_wcssd_min = conv_ctx->ewa_wcssd_min; not_improved_counter = conv_ctx->not_improved_counter; alpha = ((VALUE_TYPE) samples_per_batch) * 2.0 / (no_samples + 1); alpha = (alpha > 1.0) ? 1.0 : alpha; ewa_wcssd = ewa_wcssd * (1 - alpha) + summed_batch_wcssd * alpha; } if (ewa_wcssd < ewa_wcssd_min) { not_improved_counter = 0; ewa_wcssd_min = ewa_wcssd; } else { not_improved_counter += 1; } if (not_improved_counter >= max_not_improved_counter) return 1; conv_ctx->ewa_wcssd = ewa_wcssd; conv_ctx->ewa_wcssd_min = ewa_wcssd_min; conv_ctx->not_improved_counter = not_improved_counter; return 0; } void post_process_iteration_minibatch(struct general_kmeans_context* ctx , uint32_t* chosen_sample_map , uint32_t max_not_improved_counter , struct convergence_context* conv_ctx) { uint64_t sample_id, samples_in_this_batch; ctx->wcssd = 0; samples_in_this_batch = 0; for (sample_id = 0; sample_id < ctx->samples->sample_count; sample_id++) { if (chosen_sample_map[sample_id]) { if (ctx->cluster_assignments[sample_id] != ctx->previous_cluster_assignments[sample_id]) ctx->no_changes += 1; ctx->wcssd += ctx->cluster_distances[sample_id]; samples_in_this_batch += 1; } } ctx->total_no_calcs += ctx->done_calculations; if (ctx->track_time) ctx->duration_all_calcs = clock() - ctx->duration_all_calcs; ctx->converged = batch_convergence(ctx->samples->sample_count , samples_in_this_batch , ctx->wcssd , max_not_improved_counter , conv_ctx); ctx->wcssd = conv_ctx->ewa_wcssd; ctx->old_wcssd = ctx->wcssd; } void post_process_iteration(struct general_kmeans_context* ctx, struct kmeans_params *prms) { uint64_t sample_id; for (sample_id = 0; sample_id < ctx->samples->sample_count; sample_id++) { if (ctx->cluster_assignments[sample_id] != ctx->previous_cluster_assignments[sample_id]) ctx->no_changes += 1; } ctx->total_no_calcs += ctx->done_calculations; if (ctx->track_time) ctx->duration_all_calcs = (VALUE_TYPE) get_diff_in_microseconds(ctx->durations); /* calculate the objective. This is exact for kmeans/bv_kmeans */ ctx->wcssd = sum_value_array(ctx->cluster_distances, ctx->samples->sample_count); if (fabs(ctx->wcssd - ctx->old_wcssd) < prms->tol || ctx->no_changes == 0) { ctx->converged = 1; } ctx->old_wcssd = ctx->wcssd; } void print_iteration_summary(struct general_kmeans_context* ctx, struct kmeans_params *prms, uint32_t iteration) { size_t hash_overhead; size_t clusters_nnz; size_t clusters_memory_consumption; VALUE_TYPE relative_dense_memory_consumption; int j; if (prms->verbose) LOG_INFO("Iteration %" PRINTF_INT32_MODIFIER "u wcssd %f change: %" PRINTF_INT64_MODIFIER "u clust: %" PRINTF_INT64_MODIFIER "u d:%" PRINTF_INT64_MODIFIER "u" , iteration , ctx->wcssd , ctx->no_changes , get_nnz_uint64_array(ctx->cluster_counts, ctx->no_clusters) , ctx->done_calculations); if (ctx->track_time && prms->verbose) LOG_INFO("Timings: all_calc=%" PRINTF_INT32_MODIFIER "d/up_clu=%" PRINTF_INT32_MODIFIER "d/overall_time=%.2f" , (int32_t) (ctx->duration_all_calcs / 1000) , (int32_t) (ctx->duration_update_clusters / 1000) , get_diff_in_microseconds(ctx->tm_start)); hash_overhead = 0; clusters_nnz = 0; for (j = 0; j < ctx->no_clusters; j++) { clusters_nnz += HASH_COUNT(ctx->clusters_raw[j]); hash_overhead += ((HASH_COUNT(ctx->clusters_raw[j]) * sizeof(struct keyvaluecount_hash)) + HASH_OVERHEAD(hh, ctx->clusters_raw[j])); } clusters_memory_consumption = hash_overhead + (clusters_nnz * (sizeof(KEY_TYPE) + sizeof(VALUE_TYPE))); /* this determines how much memory the clusters use up compared to the case when * the clusters would have been stored dense. */ relative_dense_memory_consumption = ((VALUE_TYPE) 100 * clusters_memory_consumption / (ctx->samples->dim * ctx->no_clusters * sizeof(VALUE_TYPE))); d_add_flist(&(prms->tr), "iteration_wcssd", ctx->wcssd); d_add_ilist(&(prms->tr), "iteration_changes", ctx->no_changes); d_add_ilist(&(prms->tr), "iteration_remaining_clusters" , get_nnz_uint64_array(ctx->cluster_counts, ctx->no_clusters)); d_add_ilist(&(prms->tr), "iteration_clusters_mem_consumption", clusters_memory_consumption); d_add_flist(&(prms->tr), "iteration_clusters_mem_consumption_relative_dense", relative_dense_memory_consumption); d_add_ilist(&(prms->tr), "iteration_clusters_nnz", clusters_nnz); d_add_ilist(&(prms->tr), "iteration_clusters_sparsity", (clusters_nnz * 100) / (ctx->samples->dim * ctx->no_clusters)); d_add_ilist(&(prms->tr), "iteration_full_distance_calcs", ctx->done_calculations); d_add_flist(&(prms->tr), "iteration_durations_calcs", ((VALUE_TYPE) ctx->duration_all_calcs) / 1000.0); d_add_flist(&(prms->tr), "iteration_durations_update_clusters", ((VALUE_TYPE) ctx->duration_update_clusters) / 1000.0); d_add_flist(&(prms->tr), "iteration_durations", ((VALUE_TYPE) get_diff_in_microseconds(ctx->tm_start_iteration))); d_add_int(&(prms->tr), "no_iterations", iteration + 1); } struct kmeans_result* create_kmeans_result(struct kmeans_params *prms , struct general_kmeans_context* ctx) { struct kmeans_result* res; uint64_t i; d_add_float(&(prms->tr), "duration_kmeans", (VALUE_TYPE) get_diff_in_microseconds(ctx->tm_start)); res = (struct kmeans_result*) calloc(1, sizeof(struct kmeans_result)); /* create result cluster structure by with empty clusters */ res->clusters = create_matrix_without_empty_elements(ctx->cluster_vectors, ctx->no_clusters, ctx->samples->dim, NULL); res->initprms = ((struct initialization_params*) calloc(1, sizeof(struct initialization_params))); res->initprms->len_assignments = ctx->samples->sample_count; res->initprms->assignments = (uint64_t*) calloc(res->initprms->len_assignments, sizeof(uint64_t)); res->initprms->len_initial_cluster_samples = ctx->no_clusters; res->initprms->initial_cluster_samples = (uint64_t*) calloc(res->initprms->len_initial_cluster_samples, sizeof(uint64_t)); for(i = 0; i < ctx->no_clusters; i++) { res->initprms->initial_cluster_samples[i] = ctx->initial_cluster_samples[i]; } if (prms->remove_empty) { struct assign_result assign_res; uint64_t no_filled; uint64_t* map; no_filled = 0; map = NULL; if (prms->verbose) LOG_INFO("Assigning all samples to clusters to find empty ones"); /* assign all samples */ assign_res = assign(ctx->samples, res->clusters, &(prms->stop)); map = (uint64_t*) calloc(ctx->no_clusters, sizeof(uint64_t)); for (i = 0; i < ctx->no_clusters; i++) { if (assign_res.counts[i] != 0) { map[i] = no_filled; no_filled += 1; } } if (!(prms->stop)) { free_csr_matrix(res->clusters); free_null(res->clusters); res->clusters = create_matrix_without_empty_elements(ctx->cluster_vectors, ctx->no_clusters, ctx->samples->dim, assign_res.counts); for (i = 0; i < res->initprms->len_assignments; i++) { res->initprms->assignments[i] = map[ctx->cluster_assignments[i]]; } if (prms->verbose) LOG_INFO("Remaining clusters after deleting empty ones = %lu" , res->clusters->sample_count); } d_add_float(&(prms->tr), "wcssd_kmeans_with_remove_empty" , sum_value_array(assign_res.distances, assign_res.len_assignments)); free_assign_result(&assign_res); free_null(map); d_add_float(&(prms->tr), "duration_kmeans_with_remove_empty" , (VALUE_TYPE) get_diff_in_microseconds(ctx->tm_start)); } else { for (i = 0; i < res->initprms->len_assignments; i++) { res->initprms->assignments[i] = ctx->cluster_assignments[i]; } } d_add_int(&(prms->tr), "no_clusters_remaining", res->clusters->sample_count); return res; } void initialize_general_context(struct kmeans_params *prms , struct general_kmeans_context* ctx , struct csr_matrix* samples) { uint64_t i; VALUE_TYPE old_wcssd_; memset(ctx, 0, sizeof(struct general_kmeans_context)); if (prms->verbose) LOG_INFO("----------------"); if (prms->verbose) LOG_INFO("%s", KMEANS_ALGORITHM_NAMES[prms->kmeans_algorithm_id]); if (prms->verbose) LOG_INFO("----------------"); if (KMEANS_PREINIT_FUNCTIONS[prms->init_id]) { KMEANS_PREINIT_FUNCTIONS[prms->init_id](samples, prms); } d_add_subint(&(prms->tr), "general_params", "no_clusters", prms->no_clusters); d_add_substring(&(prms->tr), "general_params", "algorithm", (char*) KMEANS_ALGORITHM_NAMES[prms->kmeans_algorithm_id]); d_add_subint(&(prms->tr), "general_params", "seed", prms->seed); d_add_subint(&(prms->tr), "general_params", "remove_empty", prms->remove_empty); d_add_subint(&(prms->tr), "general_params", "iteration_limit", prms->iteration_limit); d_add_subfloat(&(prms->tr), "general_params", "tol", prms->tol); d_add_substring(&(prms->tr), "general_params", "init", (char*) KMEANS_INIT_NAMES[prms->init_id]); d_add_subint(&(prms->tr), "general_params", "no_cores_used", omp_get_max_threads()); ctx->samples = samples; gettimeofday(&(ctx->tm_start), NULL); /* enables time tracking of specific parts of the source code */ ctx->track_time = 1; /* calculate ||s|| for every s in samples */ calculate_matrix_vector_lengths(ctx->samples, &ctx->vector_lengths_samples); /* create clusters data structures */ if (prms->no_clusters > ctx->samples->sample_count) { prms->no_clusters = ctx->samples->sample_count; } ctx->clusters_raw = (struct keyvaluecount_hash**) calloc(prms->no_clusters, sizeof(struct keyvaluecount_hash*)); ctx->cluster_vectors = (struct sparse_vector*) calloc(prms->no_clusters, sizeof(struct sparse_vector)); ctx->no_clusters = prms->no_clusters; ctx->cluster_counts = (uint64_t*) calloc(prms->no_clusters, sizeof(uint64_t)); ctx->cluster_assignments = (uint64_t*) calloc(ctx->samples->sample_count, sizeof(uint64_t)); ctx->initial_cluster_samples = (uint64_t*) calloc(prms->no_clusters, sizeof(uint64_t)); ctx->cluster_distances = (VALUE_TYPE*) calloc(ctx->samples->sample_count, sizeof(VALUE_TYPE)); ctx->was_assigned = (uint32_t*) calloc(ctx->samples->sample_count, sizeof(uint32_t)); ctx->previous_cluster_assignments = NULL; gettimeofday(&(ctx->durations), NULL); /* do initialization */ KMEANS_INIT_FUNCTIONS[prms->init_id](ctx, prms); d_add_float(&(prms->tr), "duration_init", (VALUE_TYPE) get_diff_in_microseconds(ctx->durations)); /* calculate the distance from the samples to their initial clusters */ calculate_initial_distances_clusters(ctx->samples , ctx->cluster_vectors , ctx->no_clusters , ctx->cluster_assignments , ctx->vector_lengths_samples , ctx->cluster_distances); /* every cluster is assumed to not have changed in the beginning */ ctx->clusters_not_changed = (uint32_t*) calloc(prms->no_clusters, sizeof(uint32_t)); for (i = 0; i < prms->no_clusters; i++) { ctx->clusters_not_changed[i] = 0; } /* calculate the initial wcssd after initialization */ old_wcssd_ = 0; #pragma omp parallel for reduction(+:old_wcssd_) for (i = 0; i < ctx->samples->sample_count; i++) { old_wcssd_ += ctx->cluster_distances[i]; } ctx->old_wcssd = old_wcssd_; calculate_vector_list_lengths(ctx->cluster_vectors, ctx->no_clusters, &(ctx->vector_lengths_clusters)); if (prms->verbose) LOG_INFO("old_wcssd %f, input_samples = %" PRINTF_INT64_MODIFIER "u, input_dimension = %" PRINTF_INT64_MODIFIER "u, input_average_nnz = %" PRINTF_INT64_MODIFIER "u, overall_time_before_first_iteration %.2f" , ctx->old_wcssd , ctx->samples->sample_count , ctx->samples->dim, ctx->samples->pointers[ctx->samples->sample_count] / ctx->samples->sample_count , get_diff_in_microseconds(ctx->tm_start)); /* this output is needed to verify that all algorithms start at the same starting position */ /* indicates if kmeans has converged */ ctx->converged = 0; d_add_float(&(prms->tr), "initial_wcssd", ctx->old_wcssd); d_add_int(&(prms->tr), "input_samples", ctx->samples->sample_count); d_add_int(&(prms->tr), "input_dimension", ctx->samples->dim); d_add_int(&(prms->tr), "input_annz", ctx->samples->pointers[ctx->samples->sample_count] / ctx->samples->sample_count); } void search_samples_block_vectors(struct kmeans_params *prms , struct csr_matrix* samples , VALUE_TYPE desired_annz , struct csr_matrix* block_vectors_samples , uint64_t *block_vectors_dim) { determine_block_vectors_for_matrix(samples , desired_annz , block_vectors_samples , block_vectors_dim); d_add_subint(&(prms->tr), "block_vector_data", "dim", block_vectors_samples->dim); d_add_subint(&(prms->tr), "block_vector_data", "annz", (uint64_t) (block_vectors_samples->pointers[block_vectors_samples->sample_count] / block_vectors_samples->sample_count)); d_add_subint(&(prms->tr), "block_vector_data", "annz_samples", (uint64_t) (samples->pointers[samples->sample_count] / samples->sample_count)); if (prms->verbose) LOG_INFO("n_blockvector_mtrx = %" PRINTF_INT64_MODIFIER "u, average_nnz_blockvektor_mtrx = %" PRINTF_INT64_MODIFIER "u" , block_vectors_samples->dim , block_vectors_samples->pointers[block_vectors_samples->sample_count] / block_vectors_samples->sample_count); } void switch_to_shifted_clusters(struct general_kmeans_context* ctx) { /* free old clusters as they are replaced with the shifted ones */ uint64_t i; for (i = 0; i < ctx->no_clusters; i++) { if (ctx->cluster_vectors[i].keys != ctx->shifted_cluster_vectors[i].keys) { free_null(ctx->cluster_vectors[i].keys); free_null(ctx->cluster_vectors[i].values); ctx->cluster_vectors[i].nnz = ctx->shifted_cluster_vectors[i].nnz; ctx->cluster_vectors[i].keys = ctx->shifted_cluster_vectors[i].keys; ctx->cluster_vectors[i].values = ctx->shifted_cluster_vectors[i].values; } } free_null(ctx->shifted_cluster_vectors); free_null(ctx->vector_lengths_clusters); ctx->vector_lengths_clusters = ctx->vector_lengths_shifted_clusters; } void calculate_shifted_clusters_general(struct general_kmeans_context* ctx , uint32_t* active_sample_map , uint32_t update_type) { uint64_t* was_cluster_hashmap_changed; uint64_t j; if (ctx->track_time) gettimeofday(&(ctx->durations), NULL); was_cluster_hashmap_changed = (uint64_t*) calloc(ctx->no_clusters, sizeof(uint64_t)); #pragma omp parallel for schedule(dynamic, 1000) for (j = 0; j < ctx->no_clusters; j++) { ctx->clusters_not_changed[j] = 1; } /* update cluster_centers if needed */ for (j = 0; j < ctx->samples->sample_count; j++) { if (update_type == UPDATE_TYPE_KMEANS) { if ((ctx->previous_cluster_assignments[j] != ctx->cluster_assignments[j]) || !ctx->was_assigned[j]) { KEY_TYPE* keys; VALUE_TYPE* values; uint64_t nnz; keys = ctx->samples->keys + ctx->samples->pointers[j]; values = ctx->samples->values + ctx->samples->pointers[j]; nnz = ctx->samples->pointers[j + 1] - ctx->samples->pointers[j]; if (ctx->was_assigned[j]) { remove_sample_from_hashmap(ctx->clusters_raw, keys, values, nnz, ctx->previous_cluster_assignments[j]); ctx->cluster_counts[ctx->previous_cluster_assignments[j]] -= 1; ctx->clusters_not_changed[ctx->previous_cluster_assignments[j]] = 0; } was_cluster_hashmap_changed[ctx->cluster_assignments[j]] += add_sample_to_hashmap(ctx->clusters_raw, keys, values, nnz, ctx->cluster_assignments[j]); ctx->cluster_counts[ctx->cluster_assignments[j]] += 1; ctx->clusters_not_changed[ctx->cluster_assignments[j]] = 0; ctx->was_assigned[j] = 1; } } else if (update_type == UPDATE_TYPE_MINIBATCH_KMEANS) { if (active_sample_map[j]) { KEY_TYPE* keys; VALUE_TYPE* values; uint64_t nnz; keys = ctx->samples->keys + ctx->samples->pointers[j]; values = ctx->samples->values + ctx->samples->pointers[j]; nnz = ctx->samples->pointers[j + 1] - ctx->samples->pointers[j]; was_cluster_hashmap_changed[ctx->cluster_assignments[j]] += add_sample_to_hashmap_minibatch_kmeans(ctx->clusters_raw , keys , values , nnz , ctx->cluster_assignments[j] , ctx->cluster_counts[ctx->cluster_assignments[j]]); ctx->cluster_counts[ctx->cluster_assignments[j]] += 1; ctx->clusters_not_changed[ctx->cluster_assignments[j]] = 0; ctx->was_assigned[j] = 1; } } } ctx->shifted_cluster_vectors = (struct sparse_vector*) calloc(ctx->no_clusters, sizeof(struct csr_matrix)); for (j = 0; j < ctx->no_clusters; j++) { if (was_cluster_hashmap_changed[j]) { HASH_SORT((ctx->clusters_raw)[j], id_sort); } if (ctx->clusters_not_changed[j]) { /* cluster was not changed! use old cluster as shifted */ ctx->shifted_cluster_vectors[j].nnz = ctx->cluster_vectors[j].nnz; ctx->shifted_cluster_vectors[j].keys = ctx->cluster_vectors[j].keys; ctx->shifted_cluster_vectors[j].values = ctx->cluster_vectors[j].values; } else { /* cluster has changed! adapt it */ uint64_t local_feature_count; struct keyvaluecount_hash *current_item, *tmp; current_item = NULL; tmp = NULL; ctx->shifted_cluster_vectors[j].nnz = HASH_COUNT(ctx->clusters_raw[j]); /* printf("adapt cluster %u %u", j, ctx->shifted_cluster_vectors[j].nnz); */ if (ctx->shifted_cluster_vectors[j].nnz > 0) { ctx->shifted_cluster_vectors[j].keys = (KEY_TYPE*) calloc(ctx->shifted_cluster_vectors[j].nnz, sizeof(KEY_TYPE)); ctx->shifted_cluster_vectors[j].values = (VALUE_TYPE*) calloc(ctx->shifted_cluster_vectors[j].nnz, sizeof(VALUE_TYPE)); } local_feature_count = 0; HASH_ITER(hh, ctx->clusters_raw[j], current_item, tmp) { ctx->shifted_cluster_vectors[j].keys[local_feature_count] = current_item->id; if (update_type == UPDATE_TYPE_MINIBATCH_KMEANS) { ctx->shifted_cluster_vectors[j].values[local_feature_count] = current_item->val; } else { ctx->shifted_cluster_vectors[j].values[local_feature_count] = current_item->val / ctx->cluster_counts[j]; } local_feature_count += 1; } } } /* only recalculate for clusters which have actually changed */ ctx->vector_lengths_shifted_clusters = (VALUE_TYPE*) calloc(ctx->no_clusters, sizeof(VALUE_TYPE)); memcpy(ctx->vector_lengths_shifted_clusters, ctx->vector_lengths_clusters, ctx->no_clusters * sizeof(VALUE_TYPE)); update_vector_list_lengths(ctx->shifted_cluster_vectors , ctx->no_clusters , ctx->clusters_not_changed , ctx->vector_lengths_shifted_clusters); free_null(was_cluster_hashmap_changed); if (ctx->track_time) ctx->duration_update_clusters = (VALUE_TYPE) get_diff_in_microseconds(ctx->durations); } void calculate_shifted_clusters(struct general_kmeans_context* ctx) { calculate_shifted_clusters_general(ctx, NULL, UPDATE_TYPE_KMEANS); } void calculate_shifted_clusters_minibatch_kmeans(struct general_kmeans_context* ctx , uint32_t* active_sample_map) { calculate_shifted_clusters_general(ctx , active_sample_map , UPDATE_TYPE_MINIBATCH_KMEANS); } void create_kmeans_cluster_groups(struct sparse_vector *clusters_list , uint64_t no_clusters , uint64_t dim , struct group** groups , uint64_t* no_groups) { uint64_t i; struct kmeans_result* res; uint64_t* cluster_counters; uint32_t stop; struct csr_matrix clusters; struct assign_result assign_res; struct kmeans_params prms; prms.kmeans_algorithm_id = ALGORITHM_KMEANS; prms.no_clusters = *no_groups; prms.seed = 1; prms.iteration_limit = 5; prms.verbose = 0; prms.init_id = KMEANS_INIT_KMPP; prms.tol = 1e-6; prms.remove_empty = 0; prms.stop = 0; prms.tr = NULL; stop = 0; sparse_vector_list_to_csr_matrix(clusters_list , no_clusters , dim , &clusters); /* cluster the cluster centers into no_group groups */ res = bv_kmeans(&clusters, &prms); *no_groups = res->clusters->sample_count; /* assign clusters to groups */ assign_res = assign(&clusters, res->clusters, &stop); *groups = (struct group*) calloc(*no_groups, sizeof(struct group)); cluster_counters = (uint64_t*) calloc(*no_groups, sizeof(uint64_t)); for (i = 0; i < *no_groups; i++) { (*groups)[i].no_clusters = assign_res.counts[i]; (*groups)[i].clusters = (uint64_t*) calloc((*groups)[i].no_clusters , sizeof(uint64_t)); } for (i = 0; i < clusters.sample_count; i++) { uint64_t group; group = assign_res.assignments[i]; (*groups)[group].clusters[cluster_counters[group]] = i; cluster_counters[group]++; } free_cdict(&(prms.tr)); free(cluster_counters); free_kmeans_result(res); free_assign_result(&assign_res); free_csr_matrix(&clusters); } void calculate_initial_distances_clusters(struct csr_matrix *samples , struct sparse_vector* clusters , uint64_t no_clusters , uint64_t* cluster_assignments , VALUE_TYPE* vector_lengths_samples , VALUE_TYPE* cluster_distances) { VALUE_TYPE* vector_lengths_clusters; uint64_t sample_id; calculate_vector_list_lengths(clusters, no_clusters, &vector_lengths_clusters); #pragma omp parallel for schedule(dynamic, 1000) for (sample_id = 0; sample_id < samples->sample_count; sample_id++) { cluster_distances[sample_id] = euclid_vector_list(samples, sample_id , clusters, cluster_assignments[sample_id] , vector_lengths_samples , vector_lengths_clusters); } free(vector_lengths_clusters); } void calculate_distance_clustersold_to_clustersnew(VALUE_TYPE* distance_clustersold_to_clustersnew , struct sparse_vector* new_clusters , struct sparse_vector* old_clusters , uint64_t no_clusters , VALUE_TYPE* vector_length_new_clusters , VALUE_TYPE* vector_length_old_clusters , uint32_t* clusters_not_changed) { uint64_t cluster_id; #pragma omp parallel for schedule(dynamic, 1000) for (cluster_id = 0; cluster_id < no_clusters; cluster_id++) { if (clusters_not_changed[cluster_id] == 0) { distance_clustersold_to_clustersnew[cluster_id] = euclid_vector(new_clusters[cluster_id].keys , new_clusters[cluster_id].values , new_clusters[cluster_id].nnz , old_clusters[cluster_id].keys , old_clusters[cluster_id].values , old_clusters[cluster_id].nnz , vector_length_new_clusters[cluster_id] , vector_length_old_clusters[cluster_id]); } else { distance_clustersold_to_clustersnew[cluster_id] = 0; } } }
calculate_discontinuous_distance_to_skin_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Pooyan Dadvand // Ruben Zorrilla // // Collaborators: Franziska Wahl // #if !defined(KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED ) #define KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "geometries/plane_3d.h" #include "includes/checks.h" #include "processes/process.h" #include "processes/find_intersected_geometrical_objects_process.h" #include "utilities/variable_utils.h" #include "utilities/pointer_communicator.h" namespace Kratos { ///@addtogroup Kratos Core ///@{ ///@name Kratos Classes ///@{ class KRATOS_API(KRATOS_CORE) CalculateDiscontinuousDistanceToSkinProcessFlags { public: KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_ELEMENTAL_EDGE_DISTANCES); /// Local flag to switch on/off the elemental edge distances storage KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_ELEMENTAL_EDGE_DISTANCES_EXTRAPOLATED); /// Local flag to switch on/off the extrapolated elemental edge distances storage KRATOS_DEFINE_LOCAL_FLAG(USE_POSITIVE_EPSILON_FOR_ZERO_VALUES); /// Local flag to switch from positive (true) to negative (false) epsilon when replacing zero distance values. }; /// This only calculates the distance. Calculating the inside outside should be done by a derived class of this. /** This process takes a volume model part (with tetrahedra mesh) and a skin model part (with triangle mesh) and and calcualtes the distance to the skin for all the elements and nodes of the volume model part. */ template<std::size_t TDim = 3> class KRATOS_API(KRATOS_CORE) CalculateDiscontinuousDistanceToSkinProcess : public Process { public: ///@name Type Definitions ///@{ /// Pointer definition of CalculateDiscontinuousDistanceToSkinProcess KRATOS_CLASS_POINTER_DEFINITION(CalculateDiscontinuousDistanceToSkinProcess); ///@} ///@name Life Cycle ///@{ /// Constructor to be used. CalculateDiscontinuousDistanceToSkinProcess( ModelPart& rVolumePart, ModelPart& rSkinPart); /// Constructor with option CalculateDiscontinuousDistanceToSkinProcess( ModelPart& rVolumePart, ModelPart& rSkinPart, const Flags rOptions); /// Destructor. ~CalculateDiscontinuousDistanceToSkinProcess() override; ///@} ///@name Deleted ///@{ /// Default constructor. CalculateDiscontinuousDistanceToSkinProcess() = delete; /// Copy constructor. CalculateDiscontinuousDistanceToSkinProcess(CalculateDiscontinuousDistanceToSkinProcess const& rOther) = delete; /// Assignment operator. CalculateDiscontinuousDistanceToSkinProcess& operator=(CalculateDiscontinuousDistanceToSkinProcess const& rOther) = delete; FindIntersectedGeometricalObjectsProcess mFindIntersectedObjectsProcess; ///@} ///@name Operations ///@{ /** * @brief Initializes discontinuous distance computation process * This method initializes the TO_SPLIT flag, the DISTANCE and * ELEMENTAL_DISTANCES variables as well as the EMBEDDED_VELOCITY */ virtual void Initialize(); /** * @brief Calls the FindIntersectedObjectsProcess to find the intersections * This method calls the FindIntersectedObjectsProcess FindIntersections method. */ virtual void FindIntersections(); /** * @brief Get the array containing the intersecting objects * This method returns an array containing pointers to the intersecting geometries * @return std::vector<PointerVector<GeometricalObject>>& */ virtual std::vector<PointerVector<GeometricalObject>>& GetIntersections(); /** * @brief Computes the elemental distance values * Given an intersecting objects vector, this method computes the elemental distance field * @param rIntersectedObjects array containing pointers to the intersecting geometries */ virtual void CalculateDistances(std::vector<PointerVector<GeometricalObject>>& rIntersectedObjects); /** * @brief Calls the FindIntersectedObjects Clear() method * This method calls the FindIntersectedObjects Clear() to empty the intersecting objects geometries array */ void Clear() override; /** * @brief Executes the CalculateDiscontinuousDistanceToSkinProcess * This method automatically does all the calls required to compute the discontinuous distance function. */ void Execute() override; /** * @brief Calculate embedded variable from skin double specialization * This method calls the specialization method for two double variables * @param rVariable origin double variable in the skin mesh * @param rEmbeddedVariable elemental double variable in the volume mesh to be computed */ void CalculateEmbeddedVariableFromSkin( const Variable<double> &rVariable, const Variable<double> &rEmbeddedVariable); /** * @brief Calculate embedded variable from skin array specialization * This method calls the specialization method for two double variables * @param rVariable origin array variable in the skin mesh * @param rEmbeddedVariable elemental array variable in the volume mesh to be computed */ void CalculateEmbeddedVariableFromSkin( const Variable<array_1d<double,3>> &rVariable, const Variable<array_1d<double,3>> &rEmbeddedVariable); ///@} ///@name Access ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override; /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override; /// Print object's data. void PrintData(std::ostream& rOStream) const override; ///@} protected: ///@name Protected Operations ///@{ /** * @brief Set the Intersection Plane object * This method returns the plane that defines the element intersection. The 2D * case is considered to be a simplification of the 3D one, so a "fake" extra * point is created by extruding the first point in the z-direction. * @param rIntPtsVector array containing the intersecting points coordinates * @return Plane3D the plane defined by the given intersecting points coordinates */ Plane3D SetIntersectionPlane(const std::vector<array_1d<double,3>> &rIntPtsVector); /** * @brief Calculates the domain characteristic length * This method computes the domain characteristic length as the norm of * the diagonal vector that joins the maximum and minimum coordinates * @return double the calculated characteristic length */ double CalculateCharacteristicLength(); ///@} private: ///@name Member Variables ///@{ ModelPart& mrSkinPart; ModelPart& mrVolumePart; Flags mOptions; static const std::size_t mNumNodes = TDim + 1; static const std::size_t mNumEdges = (TDim == 2) ? 3 : 6; const double mZeroToleranceMultiplier = 1e3; bool mDetectedZeroDistanceValues = false; bool mAreNeighboursComputed = false; ///@} ///@name Private Operations ///@{ /** * @brief Computes the discontinuous distance in one element * This method computes the discontinuous distance field for a given element * @param rElement1 reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries */ void CalculateElementalDistances( Element& rElement1, PointerVector<GeometricalObject>& rIntersectedObjects); /** * @brief Computes the discontinuous edge-based distance in one element * This method computes the discontinuous edge-based distance field for a given element * @param rElement1 reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries */ void CalculateElementalAndEdgeDistances( Element& rElement1, PointerVector<GeometricalObject>& rIntersectedObjects); /** * @brief Computes the edges intersections in one element * Provided a list of elemental intersecting geometries, this * method computes the edge intersections for a given element * @param rElement1 reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the average intersection points of the extrapolated geometry * @param rIntersectionPointsArray array containing the edges intersection points * @return unsigned int number of cut edges */ unsigned int ComputeEdgesIntersections( Element& rElement1, const PointerVector<GeometricalObject>& rIntersectedObjects, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, array_1d<double,mNumEdges> &rCutEdgesRatioVector, array_1d<double,mNumEdges> &rCutExtraEdgesRatioVector, std::vector<array_1d <double,3> > &rIntersectionPointsArray); /** * @brief Computes the intersection of a single edge * This method computes the intersection of a given edge with the candidate * intersecting geometry. This operation is performed accordingly to the working * space dimension using the intersection utilities implemented in intersection_utilities.h * @param rIntObjGeometry candidate intersecting geometry * @param rEdgePoint1 edge origin point * @param rEdgePoint2 edge end point * @param rIntersectionPoint intersection point * @return int type of intersection id (see intersection_utilities.h) */ int ComputeEdgeIntersection( const Element::GeometryType& rIntObjGeometry, const Element::NodeType& rEdgePoint1, const Element::NodeType& rEdgePoint2, Point& rIntersectionPoint); /** * @brief Checks if rIntersectionPoint is already present in the * intersection point list in rIntersectionPointsVector for the tolerance rTolerance. * @param rIntersectionPoint reference to the intersection point * @param rIntersectionPointsVector reference to the list of already computed intersected points * @param rEdgeTolerance tolerance to compare two points and assess if they are equal * @return bool if rIntersectionPoint is present in rIntersectionPointsVector */ bool CheckIfPointIsRepeated( const array_1d<double,3>& rIntersectionPoint, const std::vector<array_1d<double,3>>& rIntersectionPointsVector, const double& rEdgeTolerance); /** * @brief Computes the element intersection unit normal * This method computes the element intersection unit normal vector using the distance function gradient. * @param rGeometry reference to the geometry of the element of interest * @param rElementalDistances array containing the ELEMENTAL_DISTANCES values * @param rNormal obtained unit normal vector */ void ComputeIntersectionNormal( const Element::GeometryType& rGeometry, const Vector& rElementalDistances, array_1d<double,3> &rNormal); /** * @brief Computes the nodal distances to the intersection plane * This methods creates a plane from the intersection points and then calculates the nodal distances * to the intersection plane. * In presence of multiple intersections, it performs a least squares approximation of the intersection plane. * @param rElement Element to calculate the ELEMENTAL_DISTANCES * @param rIntersectedObjects Intersected objects container * @param rIntersectionPointsCoordinates The edges intersection points coordinates */ void ComputeIntersectionPlaneElementalDistances( Element& rElement, const PointerVector<GeometricalObject>& rIntersectedObjects, const std::vector<array_1d<double,3>>& rIntersectionPointsCoordinates); /** * @brief Computes the intersection plane approximation * For complex intersection patterns, this method takes a list containing * all the intersecting points and computes the plane that minimizes the * distance from all these points in a least squares sense. The approximated * plane is defined in terms of an origin point and its normal vector. * @param rElement1 reference to the element of interest * @param rPointsCoord list containing the coordinates of al the intersecting points * @param rPlaneBasePointCoords base point defining the approximated plane * @param rPlaneNormal normal vector defining the approximated plane */ void ComputePlaneApproximation( const Element& rElement1, const std::vector< array_1d<double,3> >& rPointsCoord, array_1d<double,3>& rPlaneBasePointCoords, array_1d<double,3>& rPlaneNormal); /** * @brief Computes the elemental distances from the approximation * plane defined by the set of points in rPointVector. * @param rElement reference to the element of interest * @param rElementalDistances reference to the elemental distances container containing the coordinates of al the intersecting points * @param rPoitnVector reference to the vector containing the poits to define the approximation plane */ void ComputeElementalDistancesFromPlaneApproximation( Element& rElement, Vector& rElementalDistances, const std::vector<array_1d<double,3>>& rPointVector); /** * @brief Checks and replaces the values of the ELEMENTAL_DISTANCES vector that are * zero. The values are replaced by an epsilon (whose sign depends on a flag) * that is a fixed factor from the double precision. Can be deactivated by a flag. * @param rElementalDistances array containing the ELEMENTAL_DISTANCES values */ void ReplaceZeroDistances(Vector& rElementalDistances); /** * @brief Checks (and corrects if needed) the intersection normal orientation * This method checks the orientation of the previously computed intersection normal. * To do that, the normal vector to each one of the intersecting geometries is * computed and its directo is compared against the current one. If the negative * votes win, the current normal vector orientation is switched. * @param rGeometry element of interest geometry * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries * @param rElementalDistances array containing the ELEMENTAL_DISTANCES values */ void CorrectDistanceOrientation( const Element::GeometryType& rGeometry, const PointerVector<GeometricalObject>& rIntersectedObjects, Vector& rElementalDistances); /** * @brief Computes the normal vector to an intersecting object geometry * This method computes the normal vector to an intersecting object geometry. * @param rGeometry reference to the geometry of the intersecting object * @param rIntObjNormal reference to the intersecting object normal vector */ void inline ComputeIntersectionNormalFromGeometry( const Element::GeometryType &rGeometry, array_1d<double,3> &rIntObjNormal); /** * @brief Checks if element is incised and then computes the uncut edges intersections of the element * with an averaged and extrapolated geometry. Therefore it calls 'ComputeExtrapolatedGeometryIntersections'. * Note: for uncut or completely cut elements no ratios of the extrapolated geometry will be calculated. * @param rElement reference to the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rNumCutEdges number of cut edges of the element (by the non-extrapolated geometry) * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @param rExtraGeomNormal array as normal vector of the averaged and extrapolated geometry * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the additional * average intersection points of the extrapolated geometry */ void ComputeExtrapolatedEdgesIntersectionsIfIncised( const Element& rElement, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, unsigned int &rNumCutEdges, array_1d<double,mNumEdges>& rCutEdgesRatioVector, array_1d<double,3> &rExtraGeomNormal, array_1d<double,mNumEdges>& rCutExtraEdgesRatioVector); /** * @brief Computes the uncut edges intersections of one element with an averaged and extrapolated geometry. * Therefore it calls 'IntersectionUtilities'. * It saves the edge intersections as ratios of the edge's length in rCutExtraEdgesRatioVector. * @param rElement reference to the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rNumCutEdges number of cut edges of the element * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @param rExtraGeomNormal normal of the averaged and extrapolated geometry * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the additional * average intersection points of the extrapolated geometry */ void ComputeExtrapolatedGeometryIntersections( const Element& rElement, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, unsigned int& rNumCutEdges, array_1d<double,mNumEdges>& rCutEdgesRatioVector, array_1d<double,3>& rExtraGeomNormal, array_1d<double,mNumEdges>& rCutExtraEdgesRatioVector); /** * @brief Converts edge ratios and edge ratios of the extrapolated geometry to elemental (node) distances * @param rElement reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * (ELEMENTAL_EDGE_DISTANCES) * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the additional * average intersection points of the extrapolated geometry (ELEMENTAL_EXTRA_EDGE_DISTANCES) */ void ComputeElementalDistancesFromEdgeRatios( Element& rElement, const PointerVector<GeometricalObject>& rIntersectedObjects, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, const array_1d<double,mNumEdges> &rCutEdgesRatioVector, const array_1d<double,mNumEdges> &rCutExtraEdgesRatioVector); /** * @brief Computes the intersection points from the intersection ratios of the edges of the element of interest * @param rGeometry reference to geometry of the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rEdgeRatiosVector array containing the intersection ratios of an element's edges * @param rIntersectionPointsVector vector containing the intersection point arrays */ void ConvertRatiosToIntersectionPoints( const Element::GeometryType& rGeometry, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, const array_1d<double,mNumEdges> &rEdgeRatiosVector, std::vector<array_1d <double,3> > &rIntersectionPointsVector); /** * @brief Checks whether the edges of an element, which are cut, all share one node * @param rEdge reference to the edge of interest * @param rIntersectionPoint average intersection point at the edge * @return calculated relative positions of the intersection point along the edge from node zero */ double ConvertIntersectionPointToEdgeRatio( const Geometry<Node<3> >& rEdge, const array_1d<double,3>& rIntersectionPoint); /** * @brief Checks whether the edges of an element, which are cut, all share one node * @param rEdge reference to the edge of interest * @param rEdgeRatio relative positions of the intersection point along the edge from node zero * @return rIntersectionPoint calculated average intersection point at the edge */ array_1d<double,3> ConvertEdgeRatioToIntersectionPoint( const Geometry<Node<3> >& rEdge, const double& rEdgeRatio); /** * @brief Checks whether the edges of an element, which are cut, all share one node * @param rElement reference to the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @return boolean true if cut edges share one node */ bool CheckIfCutEdgesShareNode( const Element& rElement, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, const array_1d<double,mNumEdges>& rCutEdgesRatioVector) const; /** * @brief Computes the value of any embedded variable * For a given array variable in the skin mesh, this method calculates the value * of such variable in the embedded mesh. This is done in each element of the volume * mesh by computing the average value of all the edges intersections. This value * is averaged again according to the number of intersected edges. * @tparam TVarType variable type * @param rVariable origin variable in the skin mesh * @param rEmbeddedVariable elemental variable in the volume mesh to be computed */ template<class TVarType> void CalculateEmbeddedVariableFromSkinSpecialization( const Variable<TVarType> &rVariable, const Variable<TVarType> &rEmbeddedVariable) { const auto &r_int_obj_vect= this->GetIntersections(); const int n_elems = mrVolumePart.NumberOfElements(); KRATOS_ERROR_IF((mrSkinPart.NodesBegin())->SolutionStepsDataHas(rVariable) == false) << "Skin model part solution step data missing variable: " << rVariable << std::endl; // Initialize embedded variable value VariableUtils().SetNonHistoricalVariableToZero(rEmbeddedVariable, mrVolumePart.Elements()); // Compute the embedded variable value for each element #pragma omp parallel for schedule(dynamic) for (int i_elem = 0; i_elem < n_elems; ++i_elem) { // Check if the current element has intersecting entities if (r_int_obj_vect[i_elem].size() != 0) { // Initialize the element values unsigned int n_int_edges = 0; auto it_elem = mrVolumePart.ElementsBegin() + i_elem; auto &r_geom = it_elem->GetGeometry(); const auto edges = r_geom.GenerateEdges(); // Loop the element of interest edges for (unsigned int i_edge = 0; i_edge < r_geom.EdgesNumber(); ++i_edge) { // Initialize edge values unsigned int n_int_obj = 0; TVarType i_edge_val = rEmbeddedVariable.Zero(); // Check the edge intersection against all the candidates for (auto &r_int_obj : r_int_obj_vect[i_elem]) { Point intersection_point; const int is_intersected = this->ComputeEdgeIntersection( r_int_obj.GetGeometry(), edges[i_edge][0], edges[i_edge][1], intersection_point); // Compute the variable value in the intersection point if (is_intersected == 1) { n_int_obj++; array_1d<double,3> local_coords; r_int_obj.GetGeometry().PointLocalCoordinates(local_coords, intersection_point); Vector int_obj_N; r_int_obj.GetGeometry().ShapeFunctionsValues(int_obj_N, local_coords); for (unsigned int i_node = 0; i_node < r_int_obj.GetGeometry().PointsNumber(); ++i_node) { i_edge_val += r_int_obj.GetGeometry()[i_node].FastGetSolutionStepValue(rVariable) * int_obj_N[i_node]; } } } // Check if the edge is intersected if (n_int_obj != 0) { // Update the element intersected edges counter n_int_edges++; // Add the average edge value (there might exist cases in where // more than one geometry intersects the edge of interest). it_elem->GetValue(rEmbeddedVariable) += i_edge_val / n_int_obj; } } // Average between all the intersected edges if (n_int_edges != 0) { it_elem->GetValue(rEmbeddedVariable) /= n_int_edges; } } } }; /** * @brief Set the TO_SPLIT Kratos flag * This function sets the TO_SPLIT flag in the provided element according to the ELEMENTAL_DISTANCES values * Note that the zero distance case is avoided by checking the positiveness and negativeness of the nodal values * @param rElement Element to set the TO_SPLIT flag * @param ZeroTolerance Tolerance to check the zero distance values */ void SetToSplitFlag( Element& rElement, const double ZeroTolerance); /** * @brief Checks the elemental edges distances if zero values of the distance * are detected. This ensures that the elementes detected as incised and intersected * are consistent with the zero-correction applied by the process. */ void CheckAndCorrectEdgeDistances(); /** * @brief Creates the global pointer communicator that contains all neighbours elements. In MPI, this * allows to get information from neighbours elements that are not in the same partition. */ GlobalPointerCommunicator<Element>::Pointer CreatePointerCommunicator(); ///@} }; // Class CalculateDiscontinuousDistanceToSkinProcess ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >> ( std::istream& rIStream, CalculateDiscontinuousDistanceToSkinProcess<>& rThis); /// output stream function inline std::ostream& operator << ( std::ostream& rOStream, const CalculateDiscontinuousDistanceToSkinProcess<>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED defined
Example_tasking.2.c
/* * @@name: tasking.2c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_3.0 */ struct node { struct node *left; struct node *right; }; extern void process(struct node *); void postorder_traverse( struct node *p ) { if (p->left) #pragma omp task // p is firstprivate by default postorder_traverse(p->left); if (p->right) #pragma omp task // p is firstprivate by default postorder_traverse(p->right); #pragma omp taskwait process(p); }
omp_for.c
#include <stdio.h> #include <omp.h> int main(int argc, char** argv){ int partial_Sum, total_Sum; #pragma omp parallel private(partial_Sum) shared(total_Sum) { partial_Sum = 0; total_Sum = 0; #pragma omp for for(int i = 1; i <= 10; i++){ partial_Sum += i; } //Create thread safe region. #pragma omp critical { //add each threads partial sum to the total sum total_Sum += partial_Sum; } } printf("Total Sum: %d\n", total_Sum); return 0; }
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5)); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5)); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if ((sharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l a t e r a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilateralBlurImage() is a non-linear, edge-preserving, and noise-reducing % smoothing filter for images. It replaces the intensity of each pixel with % a weighted average of intensity values from nearby pixels. This weight is % based on a Gaussian distribution. The weights depend not only on Euclidean % distance of pixels, but also on the radiometric differences (e.g., range % differences, such as color intensity, depth distance, etc.). This preserves % sharp edges. % % The format of the BilateralBlurImage method is: % % Image *BilateralBlurImage(const Image *image,const size_t width, % const size_t height,const double intensity_sigma, % const double spatial_sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the neighborhood in pixels. % % o height: the height of the neighborhood in pixels. % % o intensity_sigma: sigma in the intensity space. A larger value means % that farther colors within the pixel neighborhood (see spatial_sigma) % will be mixed together, resulting in larger areas of semi-equal color. % % o spatial_sigma: sigma in the coordinate space. A larger value means that % farther pixels influence each other as long as their colors are close % enough (see intensity_sigma ). When the neigborhood diameter is greater % than zero, it specifies the neighborhood size regardless of % spatial_sigma. Otherwise, the neigborhood diameter is proportional to % spatial_sigma. % % o exception: return any errors or warnings in this structure. % */ static inline double BlurDistance(const ssize_t x,const ssize_t y, const ssize_t u,const ssize_t v) { return(sqrt(((double) x-u)*((double) x-u)+((double) y-v)*((double) y-v))); } static inline double BlurGaussian(const double x,const double sigma) { return(exp(-((double) x*x)*PerceptibleReciprocal(2.0*sigma*sigma))* PerceptibleReciprocal(Magick2PI*sigma*sigma)); } static double **DestroyBilateralThreadSet(const ssize_t number_threads, double **weights) { register ssize_t i; assert(weights != (double **) NULL); for (i=0; i <= (ssize_t) number_threads; i++) if (weights[i] != (double *) NULL) weights[i]=(double *) RelinquishMagickMemory(weights[i]); weights=(double **) RelinquishMagickMemory(weights); return(weights); } static double **AcquireBilateralThreadSet(const size_t number_threads, const size_t width,const size_t height) { double **weights; register ssize_t i; weights=(double **) AcquireQuantumMemory(number_threads+1,sizeof(*weights)); if (weights == (double **) NULL) return((double **) NULL); (void) memset(weights,0,number_threads*sizeof(*weights)); for (i=0; i <= (ssize_t) number_threads; i++) { weights[i]=(double *) AcquireQuantumMemory(width,height*sizeof(**weights)); if (weights[i] == (double *) NULL) return(DestroyBilateralThreadSet(number_threads,weights)); } return(weights); } MagickExport Image *BilateralBlurImage(const Image *image,const size_t width, const size_t height,const double intensity_sigma,const double spatial_sigma, ExceptionInfo *exception) { #define MaxIntensity (255) #define BilateralBlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double intensity_gaussian[2*(MaxIntensity+1)], *spatial_gaussian, **weights; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo mid; register ssize_t u; ssize_t n, number_threads, v; ssize_t i, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } number_threads=(size_t) GetMagickResourceLimit(ThreadResource); weights=AcquireBilateralThreadSet(number_threads,width,height); if (weights == (double **) NULL) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=(-MaxIntensity); i < MaxIntensity; i++) intensity_gaussian[i+MaxIntensity]=BlurGaussian((double) i,intensity_sigma); spatial_gaussian=weights[number_threads]; n=0; mid.x=(ssize_t) (width/2L); mid.y=(ssize_t) (height/2L); for (v=0; v < (ssize_t) height; v++) for (u=0; u < (ssize_t) width; u++) spatial_gaussian[n++]=BlurGaussian(BlurDistance(0,0,u-mid.x,v-mid.y), spatial_sigma); /* Bilateral blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { double gamma, pixel; register const Quantum *magick_restrict p, *magick_restrict r; register ssize_t i, u; ssize_t n, v; /* Tonal weighting preserves edges while smoothing in the flat regions. */ p=GetCacheViewVirtualPixels(image_view,x-mid.x,y-mid.y,width,height, exception); if (p == (const Quantum *) NULL) break; p+=(ssize_t) GetPixelChannels(image)*width*mid.y+GetPixelChannels(image)* mid.x; n=0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { double intensity; r=p+(ssize_t) GetPixelChannels(image)*(ssize_t) width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); intensity=ScaleQuantumToChar(GetPixelIntensity(image,r))- (double) ScaleQuantumToChar(GetPixelIntensity(image,p)); if ((intensity >= -MaxIntensity) && (intensity <= MaxIntensity)) weights[id][n]=intensity_gaussian[(ssize_t) intensity+MaxIntensity]* spatial_gaussian[n]; else weights[id][n]=BlurGaussian(intensity,intensity_sigma)* BlurGaussian(BlurDistance(x,y,x+u-mid.x,y+v-mid.y),spatial_sigma); n++; } } for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { PixelChannel channel; PixelTrait blur_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } pixel=0.0; gamma=0.0; n=0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { double alpha, beta; r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]*alpha*beta; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); } q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BilateralBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); weights=DestroyBilateralThreadSet(number_threads,weights); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; register ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*((ssize_t) columns+2)+x_offset); s=q-(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; register ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you. % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; register size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; register ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanline, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanline_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanline_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanline)); if (scanline_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanline=(float *) GetVirtualMemoryBlob(scanline_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanline_info=RelinquishVirtualMemory(scanline_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; register Quantum *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; PixelTrait traits; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(contrast_image,ClampToQuantum((MagickRealType) GetPixelRed(image,p)*mult),q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(contrast_image,ClampToQuantum((MagickRealType) GetPixelGreen(image,p)*mult),q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(contrast_image,ClampToQuantum((MagickRealType) GetPixelBlue(image,p)*mult),q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanline_info=RelinquishVirtualMemory(scanline_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=CastDoubleToLong(ceil((double) (i*point.y)/ hypot(point.x,point.y)-0.5)); offset[i].y=CastDoubleToLong(ceil((double) (i*point.x)/ hypot(point.x,point.y)-0.5)); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register MagickRealType *magick_restrict k; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { RectangleInfo raise; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; raise.width=(size_t) (2*i+2); raise.height=(size_t) (2*i+2); raise.x=(i-1)/2; raise.y=(i-1)/2; (void) RaiseImage(preview_image,&raise,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double) raise.height,(double) raise.x,(double) raise.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; preview_image->alpha_trait=UndefinedPixelTrait; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; register const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; register const Quantum *magick_restrict l, *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; register ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define GetShadeIntensity(image,pixel) \ ClampPixel(GetPixelIntensity((image),(pixel))) #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; register const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post)+ GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre)- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if ((shade_traits & CopyPixelTrait) != 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); if (status == MagickFalse) break; q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); /* This kernel appears to be broken. #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif */ unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if ((unsharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
apply_bcs_sommerfeld.h
// Boundary condtion driver routine: Apply BCs to all // boundary faces of the 3D numerical domain, filling in the // outer boundary ghost zone layers, starting with the innermost // layer and working outward. #include "sommerfeld_params.h" #include <string.h> void apply_bcs_sommerfeld(const paramstruct *restrict params,REAL *restrict xx[3], const bc_struct *restrict bcstruct, const int NUM_GFS, const int8_t *restrict gfs_parity, REAL *restrict gfs, REAL *restrict rhs_gfs) { if (strcmp(coord, "Cartesian") == 0){ #pragma omp parallel for for(int which_gf=0;which_gf<NUM_GFS;which_gf++) { REAL var_at_infinity = evolgf_at_inf[which_gf]; REAL radpower = evolgf_radpower[which_gf]; REAL char_speed = evolgf_speed[which_gf]; #include "RELATIVE_PATH__set_Cparameters.h" /* Header file containing correct #include for set_Cparameters.h; * accounting for the relative path */ for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { for(int pt=0;pt<bcstruct->num_ob_gz_pts[which_gz];pt++) { int i0 = bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i0; int i1 = bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i1; int i2 = bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i2; int8_t FACEX0 = bcstruct->outer[which_gz][pt].FACEi0; int8_t FACEX1 = bcstruct->outer[which_gz][pt].FACEi1; int8_t FACEX2 = bcstruct->outer[which_gz][pt].FACEi2; REAL xx0 = xx[0][i0]; REAL xx1 = xx[1][i1]; REAL xx2 = xx[2][i2]; REAL dfdx = 0.; REAL dfdy = 0.; REAL dfdz = 0.; // On a +x or -x face, do up/down winding as appropriate: if(abs(FACEX0)==1 || i0+NGHOSTS >= Nxx_plus_2NGHOSTS0 || i0-NGHOSTS <= 0) { int8_t FACE0PARITY = FACEX0; if(i0+NGHOSTS >= Nxx_plus_2NGHOSTS0) FACE0PARITY = -1; if(i0-NGHOSTS <= 0) FACE0PARITY = +1; dfdx = FACE0PARITY*(-3*gfs[IDX4S(which_gf,i0 ,i1,i2)] +4*gfs[IDX4S(which_gf,i0+1*FACE0PARITY,i1,i2)] -1*gfs[IDX4S(which_gf,i0+2*FACE0PARITY,i1,i2)])*invdx0*0.5; // Not on a +x or -x face, using centered difference: } else { dfdx = (gfs[IDX4S(which_gf,i0+1,i1,i2)]-gfs[IDX4S(which_gf,i0-1,i1,i2)])*invdx0*0.5; } // On a +y or -y face, do up/down winding as appropriate: if(abs(FACEX1)==1 || i1+NGHOSTS >= Nxx_plus_2NGHOSTS1 || i1-NGHOSTS <= 0) { int8_t FACE1PARITY = FACEX1; if(i1+NGHOSTS >= Nxx_plus_2NGHOSTS1) FACE1PARITY = -1; if(i1-NGHOSTS <= 0) FACE1PARITY = +1; dfdy = FACE1PARITY*(-3*gfs[IDX4S(which_gf,i0,i1 ,i2)] +4*gfs[IDX4S(which_gf,i0,i1+1*FACE1PARITY,i2)] -1*gfs[IDX4S(which_gf,i0,i1+2*FACE1PARITY,i2)])*invdx1*0.5; // Not on a +y or -y face, using centered difference: } else { dfdy = (gfs[IDX4S(which_gf,i0,i1+1,i2)]-gfs[IDX4S(which_gf,i0,i1-1,i2)])*invdx1*0.5; } // On a +z or -z face, do up/down winding as appropriate: if(abs(FACEX2)==1 || i2+NGHOSTS >= Nxx_plus_2NGHOSTS2 || i2-NGHOSTS <= 0) { int8_t FACE2PARITY = FACEX2; if(i2+NGHOSTS >= Nxx_plus_2NGHOSTS2) FACE2PARITY = -1; if(i2-NGHOSTS <= 0) FACE2PARITY = +1; dfdz = FACE2PARITY*(-3*gfs[IDX4S(which_gf,i0,i1,i2 )] +4*gfs[IDX4S(which_gf,i0,i1,i2+1*FACE2PARITY)] -1*gfs[IDX4S(which_gf,i0,i1,i2+2*FACE2PARITY)])*invdx2*0.5; // Not on a +z or -z face, using centered difference: } else { dfdz = (gfs[IDX4S(which_gf,i0,i1,i2+1)]-gfs[IDX4S(which_gf,i0,i1,i2-1)])*invdx2*0.5; } REAL invr = 1./sqrt(xx0*xx0 + xx1*xx1 + xx2*xx2); REAL source_rhs = -invr*char_speed*(xx0*dfdx + xx1*dfdy + xx2*dfdz + gfs[IDX4S(which_gf,i0,i1,i2)] - var_at_infinity); rhs_gfs[IDX4S(which_gf,i0,i1,i2)] = source_rhs; /************* For radial falloff and the extrapolated h'(t) term *************/ if (radpower > 0) { // Move one point away from gz point to compare pure advection to df/dt|interior int ip0 = i0+FACEX0; int ip1 = i1+FACEX1; int ip2 = i2+FACEX2; REAL xx0 = xx[0][ip0]; REAL xx1 = xx[1][ip1]; REAL xx2 = xx[2][ip2]; REAL dfdx = 0.; REAL dfdy = 0.; REAL dfdz = 0.; // On a +x or -x face, do up/down winding as appropriate: if(abs(FACEX0)==1 || ip0+NGHOSTS >= Nxx_plus_2NGHOSTS0 || ip0-NGHOSTS <= 0) { int8_t FACE0PARITY = FACEX0; if(ip0+NGHOSTS >= Nxx_plus_2NGHOSTS0) FACE0PARITY = -1; if(ip0-NGHOSTS <= 0) FACE0PARITY = +1; dfdx = FACE0PARITY*(-3*gfs[IDX4S(which_gf,ip0 ,ip1,ip2)] +4*gfs[IDX4S(which_gf,ip0+1*FACE0PARITY,ip1,ip2)] -1*gfs[IDX4S(which_gf,ip0+2*FACE0PARITY,ip1,ip2)])*invdx0*0.5; // Not on a +x or -x face, using centered difference: } else { dfdx = (gfs[IDX4S(which_gf,ip0+1,ip1,ip2)]-gfs[IDX4S(which_gf,ip0-1,ip1,ip2)])*invdx0*0.5; } // On a +y or -y face, do up/down winding as appropriate: if(abs(FACEX1)==1 || ip1+NGHOSTS >= Nxx_plus_2NGHOSTS1 || ip1-NGHOSTS <= 0) { int8_t FACE1PARITY = FACEX1; if(ip1+NGHOSTS >= Nxx_plus_2NGHOSTS1) FACE1PARITY = -1; if(ip1-NGHOSTS <= 0) FACE1PARITY = +1; dfdy = FACE1PARITY*(-3*gfs[IDX4S(which_gf,ip0,ip1 ,ip2)] +4*gfs[IDX4S(which_gf,ip0,ip1+1*FACE1PARITY,ip2)] -1*gfs[IDX4S(which_gf,ip0,ip1+2*FACE1PARITY,ip2)])*invdx1*0.5; // Not on a +y or -y face, using centered difference: } else { dfdy = (gfs[IDX4S(which_gf,ip0,ip1+1,ip2)]-gfs[IDX4S(which_gf,ip0,ip1-1,ip2)])*invdx1*0.5; } // On a +z or -z face, do up/down winding as appropriate: if(abs(FACEX2)==1 || ip2+NGHOSTS >= Nxx_plus_2NGHOSTS2 || ip2-NGHOSTS <= 0) { int8_t FACE2PARITY = FACEX2; if(ip2+NGHOSTS >= Nxx_plus_2NGHOSTS2) FACE2PARITY = -1; if(ip2-NGHOSTS <= 0) FACE2PARITY = +1; dfdz = FACE2PARITY*(-3*gfs[IDX4S(which_gf,ip0,ip1,ip2 )] +4*gfs[IDX4S(which_gf,ip0,ip1,ip2+1*FACE2PARITY)] -1*gfs[IDX4S(which_gf,ip0,ip1,ip2+2*FACE2PARITY)])*invdx2*0.5; // Not on a +z or -z face, using centered difference: } else { dfdz = (gfs[IDX4S(which_gf,ip0,ip1,ip2+1)]-gfs[IDX4S(which_gf,ip0,ip1,ip2-1)])*invdx2*0.5; } REAL rp = sqrt(xx0*xx0 + xx1*xx1 + xx2*xx2); REAL invrp = 1./rp; // Pure advection REAL extrap_rhs = invrp*char_speed*(xx0*dfdx + xx1*dfdy + xx2*dfdz + gfs[IDX4S(which_gf,ip0,ip1,ip2)] - var_at_infinity); // Take difference between pure advection and df/dt|interior REAL aux = rhs_gfs[IDX4S(which_gf,ip0,ip1,ip2)] + extrap_rhs; // Solve for h'(t)/(r_gz)^n term rhs_gfs[IDX4S(which_gf,i0,i1,i2)] += aux*pow(rp*invr,radpower); } }// END for(int pt=0;pt<num_ob_gz_pts[which_gz];pt++) // Then apply INNER (parity) boundary conditions: for(int pt=0;pt<bcstruct->num_ib_gz_pts[which_gz];pt++) { const int i0dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i0; const int i1dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i1; const int i2dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i2; const int i0src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i0; const int i1src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i1; const int i2src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i2; const int8_t *prty= bcstruct->inner[which_gz][pt].parity; // printf("%d\n",bcstruct->inner_bc_parity[which_gz][pt].parity[gfs_parity[which_gf]]); gfs[IDX4S(which_gf,i0dest,i1dest,i2dest)] = bcstruct->inner[which_gz][pt].parity[gfs_parity[which_gf]] * gfs[IDX4S(which_gf, i0src,i1src,i2src)]; }// END for(int pt=0;pt<num_ib_gz_pts[which_gz];pt++) } // END for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) } // END for(int which_gf=0;which_gf<NUM_GFS;which_gf++) }// END if coord = Cartesian /* else { #pragma omp parallel for for(int which_gf=0;which_gf<NUM_GFS;which_gf++) { REAL var_at_infinity = evolgf_at_inf[which_gf]; REAL radpower = evolgf_radpower[which_gf]; REAL char_speed = evolgf_speed[which_gf]; for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { for(int pt=0;pt<bcstruct->num_ob_gz_pts[which_gz];pt++) { int i0 = bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i0; int i1 = bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i1; int i2 = bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i2; // int8_t FACEX0 = bcstruct->outer[which_gz][pt].FACEi0; // int8_t FACEX1 = bcstruct->outer[which_gz][pt].FACEi1; // int8_t FACEX2 = bcstruct->outer[which_gz][pt].FACEi2; REAL invr = 1./(xx[0][i0]); REAL dfdr = 0.; // On a +x or -x face, do up/down winding as appropriate: dfdr = (-3*gfs[IDX4S(which_gf,i0, i1,i2)] +4*gfs[IDX4S(which_gf,i0+1,i1,i2)] -1*gfs[IDX4S(which_gf,i0+2,i1,i2)])*invdx0*0.5; REAL source_rhs = -char_speed*(dfdr + invr*(gfs[IDX4S(which_gf,i0,i1,i2)] - var_at_infinity)); rhs_gfs[IDX4S(which_gf,i0,i1,i2)] = source_rhs; /////////For radial falloff and the extrapolated h'(t) term//////// if (radpower > 0) { int ip0 = i0+1; REAL invrp = 1./(xx[0][ip0]); REAL dfdr = 0.; dfdr = (-3*gfs[IDX4S(which_gf,ip0 ,i1,i2)] +4*gfs[IDX4S(which_gf,ip0+1,i1,i2)] -1*gfs[IDX4S(which_gf,ip0+2,i1,i2)])*invdx0*0.5; REAL extrap_rhs = char_speed*(dfdr + invrp*(gfs[IDX4S(which_gf,ip0,i1,i2)] - var_at_infinity)); REAL aux = rhs_gfs[IDX4S(which_gf,ip0,i1,i2)] + extrap_rhs; rhs_gfs[IDX4S(which_gf,i0,i1,i2)] += aux*pow(xx[0][ip0]*invr,radpower); } }// END for(int pt=0;pt<num_ob_gz_pts[which_gz];pt++) // Then apply INNER (parity) boundary conditions: for(int pt=0;pt<bcstruct->num_ib_gz_pts[which_gz];pt++) { const int i0dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i0; const int i1dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i1; const int i2dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i2; const int i0src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i0; const int i1src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i1; const int i2src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i2; const int8_t *prty= bcstruct->inner[which_gz][pt].parity; // printf("%d\n",bcstruct->inner_bc_parity[which_gz][pt].parity[gfs_parity[which_gf]]); gfs[IDX4S(which_gf,i0dest,i1dest,i2dest)] = bcstruct->inner[which_gz][pt].parity[gfs_parity[which_gf]] * gfs[IDX4S(which_gf, i0src,i1src,i2src)]; }// END for(int pt=0;pt<num_ib_gz_pts[which_gz];pt++) } // END for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) } // END for(int which_gf=0;which_gf<NUM_GFS;which_gf++) }*/ else { printf("ERROR: Sommerfeld boundary conditions are currently only enabled for Cartesian coordinates.\n"); exit(1); } // END coord != Cartesian } // END function
base_mortar_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_BASE_MORTAR_CRITERIA_H) #define KRATOS_BASE_MORTAR_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "custom_utilities/contact_utilities.h" #include "utilities/mortar_utilities.h" #include "utilities/variable_utils.h" #include "custom_processes/aalm_adapt_penalty_value_process.h" #include "custom_processes/compute_dynamic_factor_process.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" // DEBUG #include "includes/gid_io.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class BaseMortarConvergenceCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Custom convergence criteria for the mortar condition * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace> class BaseMortarConvergenceCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of BaseMortarConvergenceCriteria KRATOS_CLASS_POINTER_DEFINITION( BaseMortarConvergenceCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( COMPUTE_DYNAMIC_FACTOR ); KRATOS_DEFINE_LOCAL_FLAG( IO_DEBUG ); KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The components containers typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::NodesContainerType NodesArrayType; typedef GidIO<> GidIOBaseType; ///@} ///@name Life Cycle ///@{ /// Default constructors explicit BaseMortarConvergenceCriteria( const bool ComputeDynamicFactor = false, const bool IODebug = false, const bool PureSlip = false ) : ConvergenceCriteria< TSparseSpace, TDenseSpace >(), mpIO(nullptr) { // Set local flags mOptions.Set(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR, ComputeDynamicFactor); mOptions.Set(BaseMortarConvergenceCriteria::IO_DEBUG, IODebug); mOptions.Set(BaseMortarConvergenceCriteria::PURE_SLIP, PureSlip); if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { mpIO = Kratos::make_shared<GidIOBaseType>("POST_LINEAR_ITER", GiD_PostBinary, SingleFile, WriteUndeformed, WriteElementsOnly); } } ///Copy constructor BaseMortarConvergenceCriteria( BaseMortarConvergenceCriteria const& rOther ) :BaseType(rOther), mOptions(rOther.mOptions), mpIO(rOther.mpIO) { } /// Destructor ~BaseMortarConvergenceCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Criterias that need to be called before getting the solution * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PreCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // The contact model part ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact"); // We update the normals if necessary const auto normal_variation = r_process_info.Has(CONSIDER_NORMAL_VARIATION) ? static_cast<NormalDerivativesComputation>(r_process_info.GetValue(CONSIDER_NORMAL_VARIATION)) : NO_DERIVATIVES_COMPUTATION; if (normal_variation != NO_DERIVATIVES_COMPUTATION) { ComputeNodesMeanNormalModelPartWithPairedNormal(rModelPart); // Update normal of the conditions } // Update tangent (must be updated even for constant normal) const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false; if (frictional_problem) { const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER); if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part); } else { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true); } } const bool adapt_penalty = r_process_info.Has(ADAPT_PENALTY) ? r_process_info.GetValue(ADAPT_PENALTY) : false; const bool dynamic_case = rModelPart.HasNodalSolutionStepVariable(VELOCITY); /* Compute weighthed gap */ if (adapt_penalty || dynamic_case) { // Set to zero the weighted gap ResetWeightedGap(rModelPart); // Compute the contribution ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact")); } // In dynamic case if ( dynamic_case && mOptions.Is(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR)) { ComputeDynamicFactorProcess compute_dynamic_factor_process( r_contact_model_part ); compute_dynamic_factor_process.Execute(); } // We recalculate the penalty parameter if ( adapt_penalty ) { AALMAdaptPenaltyValueProcess aalm_adaptation_of_penalty( r_contact_model_part ); aalm_adaptation_of_penalty.Execute(); } return true; } /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // We save the current WEIGHTED_GAP in the buffer NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes(); const auto it_node_begin = r_nodes_array.begin(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) { auto it_node = it_node_begin + i; it_node->FastGetSolutionStepValue(WEIGHTED_GAP, 1) = it_node->FastGetSolutionStepValue(WEIGHTED_GAP); } // Set to zero the weighted gap ResetWeightedGap(rModelPart); // Compute the contribution ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact")); // GiD IO for debugging if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false; const int nl_iter = rModelPart.GetProcessInfo()[NL_ITERATION_NUMBER]; const double label = static_cast<double>(nl_iter); if (nl_iter == 1) { mpIO->InitializeMesh(label); mpIO->WriteMesh(rModelPart.GetMesh()); mpIO->FinalizeMesh(); mpIO->InitializeResults(label, rModelPart.GetMesh()); } mpIO->WriteNodalFlags(INTERFACE, "INTERFACE", rModelPart.Nodes(), label); mpIO->WriteNodalFlags(ACTIVE, "ACTIVE", rModelPart.Nodes(), label); mpIO->WriteNodalFlags(SLAVE, "SLAVE", rModelPart.Nodes(), label); mpIO->WriteNodalFlags(ISOLATED, "ISOLATED", rModelPart.Nodes(), label); mpIO->WriteNodalResults(NORMAL, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResultsNonHistorical(DYNAMIC_FACTOR, rModelPart.Nodes(), label); mpIO->WriteNodalResultsNonHistorical(AUGMENTED_NORMAL_CONTACT_PRESSURE, rModelPart.Nodes(), label); mpIO->WriteNodalResults(DISPLACEMENT, rModelPart.Nodes(), label, 0); if (rModelPart.Nodes().begin()->SolutionStepsDataHas(VELOCITY_X)) { mpIO->WriteNodalResults(VELOCITY, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResults(ACCELERATION, rModelPart.Nodes(), label, 0); } if (r_nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) mpIO->WriteNodalResults(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE, rModelPart.Nodes(), label, 0); else if (r_nodes_array.begin()->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER_X)) mpIO->WriteNodalResults(VECTOR_LAGRANGE_MULTIPLIER, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResults(WEIGHTED_GAP, rModelPart.Nodes(), label, 0); if (frictional_problem) { mpIO->WriteNodalFlags(SLIP, "SLIP", rModelPart.Nodes(), label); mpIO->WriteNodalResults(WEIGHTED_SLIP, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResultsNonHistorical(AUGMENTED_TANGENT_CONTACT_PRESSURE, rModelPart.Nodes(), label); } } return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart The model part of interest */ void Initialize(ModelPart& rModelPart) override { // Calling base criteria BaseType::Initialize(rModelPart); // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); r_process_info.SetValue(ACTIVE_SET_COMPUTED, false); } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Update normal of the conditions ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact"); MortarUtilities::ComputeNodesMeanNormalModelPart(r_contact_model_part); const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false; if (frictional_problem) { const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER); if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part); } else { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true); } } // IO for debugging if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { mpIO->CloseResultFile(); std::ostringstream new_name ; new_name << "POST_LINEAR_ITER_STEP=""POST_LINEAR_ITER_STEP=" << rModelPart.GetProcessInfo()[STEP]; mpIO->ChangeOutputName(new_name.str()); } } /** * @brief This function finalizes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void FinalizeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // IO for debugging if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { mpIO->FinalizeResults(); } } /** * @brief This function finalizes the non-linear iteration * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual + reactions) */ void FinalizeNonLinearIteration( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Calling base criteria BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb); // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); r_process_info.SetValue(ACTIVE_SET_COMPUTED, false); } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Flags mOptions; /// Local flags ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method resets the weighted gap in the nodes of the problem * @param rModelPart Reference to the ModelPart containing the contact problem. */ virtual void ResetWeightedGap(ModelPart& rModelPart) { NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes(); VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, r_nodes_array); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ GidIOBaseType::Pointer mpIO; /// The pointer to the debugging IO ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief It computes the mean of the normal in the condition in all the nodes * @param rModelPart The model part to compute */ inline void ComputeNodesMeanNormalModelPartWithPairedNormal(ModelPart& rModelPart) { // Compute normal and tangent ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact"); MortarUtilities::ComputeNodesMeanNormalModelPart(r_contact_model_part); // Iterate over the computing conditions ModelPart& r_computing_contact_model_part = rModelPart.GetSubModelPart("ComputingContact"); ConditionsArrayType& r_conditions_array = r_computing_contact_model_part.Conditions(); const auto it_cond_begin = r_conditions_array.begin(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) { auto it_cond = it_cond_begin + i; // Aux coordinates Point::CoordinatesArrayType aux_coords; // We update the paired normal GeometryType& r_parent_geometry = it_cond->GetGeometry().GetGeometryPart(0); aux_coords = r_parent_geometry.PointLocalCoordinates(aux_coords, r_parent_geometry.Center()); it_cond->SetValue(NORMAL, r_parent_geometry.UnitNormal(aux_coords)); } } ///@} ///@name Private Access ///@{ ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Class BaseMortarConvergenceCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::COMPUTE_DYNAMIC_FACTOR(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::IO_DEBUG(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(2)); } // namespace Kratos #endif /* KRATOS_BASE_MORTAR_CRITERIA_H defined */
tinybert_test.h
// Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved. // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef _H_TINYBERT_TEST #define _H_TINYBERT_TEST #ifdef _USE_OPENMP #include <omp.h> #endif #include "inference.hpp" #include "data_loader.hpp" #include "profiling.h" #include "parse_command.h" static std::string tinybertTestKernel(U32 sequenceIndex, std::vector<Tensor> sequence, std::shared_ptr<CNN> pipeline, std::vector<std::vector<Tensor>> intents, std::vector<std::vector<Tensor>> slots, int *falseIntent, int *falseSlot, const char **inputNames, const char **outputNames) { std::map<std::string, TensorDesc> inputDescMap; inputDescMap[inputNames[0]] = sequence[0].get_desc(); inputDescMap[inputNames[1]] = sequence[1].get_desc(); inputDescMap[inputNames[2]] = sequence[2].get_desc(); pipeline->reready(inputDescMap); std::map<std::string, std::shared_ptr<U8>> inputs; inputs[inputNames[0]] = ((CpuMemory *)sequence[0].get_memory())->get_shared_ptr(); inputs[inputNames[1]] = ((CpuMemory *)sequence[1].get_memory())->get_shared_ptr(); inputs[inputNames[2]] = ((CpuMemory *)sequence[2].get_memory())->get_shared_ptr(); pipeline->set_input_tensors_value(inputs); pipeline->run(); Tensor intentSoftmax = pipeline->get_tensor_by_name(outputNames[0]); U32 intentNum = intentSoftmax.length(); U32 intentMaxIndex = 0; for (U32 index = 1; index < intentNum; index++) { if (intentSoftmax.element(index) > intentSoftmax.element(intentMaxIndex)) { intentMaxIndex = index; } } std::string log = std::string(" intent: ") + std::to_string(intentMaxIndex) + std::string(" ") + std::to_string(intentSoftmax.element(intentMaxIndex)); if (intents.size() > 0) { F32 *intentResult = (F32 *)((CpuMemory *)(intents[sequenceIndex][0].get_memory()))->get_ptr(); if (intentMaxIndex != intentResult[0] || abs(intentSoftmax.element(intentMaxIndex) - intentResult[1]) > 0.1) { (*falseIntent)++; } } Tensor slotSoftmax = pipeline->get_tensor_by_name(outputNames[1]); auto slotDesc = slotSoftmax.get_desc(); U32 slotNum = slotDesc.dims[1]; U32 slotRange = slotDesc.dims[0]; if (slotDesc.df == DF_MKT) { slotNum = slotDesc.dims[0]; slotRange = slotDesc.dims[1]; } std::vector<U32> slotSoftmaxResult; log += std::string(" slot: "); for (U32 i = 0; i < slotNum; i++) { U32 slotMaxIndex = 0; for (U32 index = 1; index < slotRange; index++) { if (slotSoftmax.element(i * slotRange + index) > slotSoftmax.element(i * slotRange + slotMaxIndex)) { slotMaxIndex = index; } } slotSoftmaxResult.push_back(slotMaxIndex); log += std::to_string(slotMaxIndex) + std::string(" "); } if (slots.size() > sequenceIndex) { U32 *slotResult = (U32 *)((CpuMemory *)(slots[sequenceIndex][0].get_memory()))->get_ptr(); for (U32 i = 0; i < slotSoftmaxResult.size(); i++) { if (slotSoftmaxResult.size() != slots[sequenceIndex][0].get_desc().dims[0] || slotResult[i] != slotSoftmaxResult[i]) { (*falseSlot)++; break; } } } return log; } inline void tinybertTest(int argc, char **argv, const char **inputNames, const char **outputNames, F32 *intentRate, F32 *slotRate) { UNI_TIME_INIT ParseRes parse_res; parseCommandLine(argc, argv, &parse_res, "examples"); char *modelPath = (char *)""; char *sequenceDirectory = (char *)""; char *affinityPolicyName = (char *)""; char *algorithmMapPath = (char *)""; if (!parse_res.model.second) { exit(-1); } if (parse_res.model.second) { modelPath = parse_res.model.first; } if (parse_res.inputPath.second) { sequenceDirectory = parse_res.inputPath.first; } if (parse_res.archInfo.second) { affinityPolicyName = parse_res.archInfo.first; } if (parse_res.algoPath.second) { algorithmMapPath = parse_res.algoPath.first; } std::shared_ptr<CNN> pipelineBase; UNI_PROFILE(pipelineBase = createPipeline(affinityPolicyName, modelPath, algorithmMapPath), std::string("bolt::prepare"), std::string("prepare")); // load sequences std::map<std::string, std::shared_ptr<Tensor>> inMap = pipelineBase->get_inputs(); std::vector<TensorDesc> sequenceDescs; TensorDesc wordInputDesc = (*(inMap[inputNames[0]])).get_desc(); wordInputDesc.dt = DT_U32; sequenceDescs.push_back(wordInputDesc); TensorDesc positionInputDesc = (*(inMap[inputNames[1]])).get_desc(); positionInputDesc.dt = DT_U32; sequenceDescs.push_back(positionInputDesc); TensorDesc tokenTypeInputDesc = (*(inMap[inputNames[2]])).get_desc(); tokenTypeInputDesc.dt = DT_U32; sequenceDescs.push_back(tokenTypeInputDesc); std::vector<std::vector<Tensor>> sequences, intents, slots; std::vector<std::string> sequencePaths = load_data(sequenceDirectory + std::string("/input"), sequenceDescs, &sequences); // load result std::vector<TensorDesc> intentDescs; TensorDesc intentDesc = tensor1d(DT_F32, 2); intentDescs.push_back(intentDesc); std::vector<std::string> intentPaths = load_data(sequenceDirectory + std::string("/intent"), intentDescs, &intents); std::vector<TensorDesc> slotDescs; slotDescs.push_back(wordInputDesc); std::vector<std::string> slotPaths = load_data(sequenceDirectory + std::string("/slot"), slotDescs, &slots); int falseIntent = 0; int falseSlot = 0; double timeBegin = ut_time_ms(); #ifdef _USE_OPENMP #pragma omp parallel num_threads(OMP_NUM_THREADS) { std::shared_ptr<CNN> pipeline = std::shared_ptr<CNN>(new CNN()); int threadId = omp_get_thread_num(); UNI_PROFILE(*pipeline = pipelineBase->clone(), std::string("bolt::clone-") + std::to_string(threadId), std::string("clone")); pipeline->set_runtime_device(threadId, threadId); #pragma omp for for (U32 sequenceIndex = 0; sequenceIndex < sequences.size(); sequenceIndex++) { std::string log = sequencePaths[sequenceIndex] + ":" + tinybertTestKernel(sequenceIndex, sequences[sequenceIndex], pipeline, intents, slots, &falseIntent, &falseSlot, inputNames, outputNames); UNI_INFO_LOG("%s\n", log.c_str()); } } #else for (U32 sequenceIndex = 0; sequenceIndex < sequences.size(); sequenceIndex++) { std::string log = sequencePaths[sequenceIndex] + ":" + tinybertTestKernel(sequenceIndex, sequences[sequenceIndex], pipelineBase, intents, slots, &falseIntent, &falseSlot, inputNames, outputNames); UNI_INFO_LOG("%s\n", log.c_str()); } #endif double timeEnd = ut_time_ms(); double totalTime = (timeEnd - timeBegin); UNI_TIME_STATISTICS U32 validSequence = UNI_MAX(1, sequences.size()); *intentRate = 100.0 * (validSequence - falseIntent) / validSequence; *slotRate = 100.0 * (validSequence - falseSlot) / validSequence; UNI_CI_LOG("intent correct rate: %f %%\n", *intentRate); UNI_CI_LOG("slot correct rate: %f %%\n", *slotRate); UNI_CI_LOG("avg_time:%fms/sequence\n", 1.0 * totalTime / validSequence); } #endif // _H_TINYBERT_TEST
GB_binop__ldexp_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ldexp_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__ldexp_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__ldexp_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ldexp_fp32) // C=scalar+B GB (_bind1st__ldexp_fp32) // C=scalar+B' GB (_bind1st_tran__ldexp_fp32) // C=A+scalar GB (_bind2nd__ldexp_fp32) // C=A'+scalar GB (_bind2nd_tran__ldexp_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = ldexpf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ldexpf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LDEXP || GxB_NO_FP32 || GxB_NO_LDEXP_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ldexp_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ldexp_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ldexp_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ldexp_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ldexp_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = ldexpf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ldexp_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = ldexpf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexpf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__ldexp_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexpf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
residualbased_elimination_builder_and_solver_componentwise.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE ) #define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* External includes */ /* Project includes */ #include "includes/define.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "includes/global_pointer_variables.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /** Short class definition. Detail class definition. This is a specialization of the standard buliding strategy to the case in which a single variable is to be used in the building. the creation of the DofList and the construction of the system matrix is in this case much faster as the neighborhood relationships are considered to be known \URL[Example of use html]{ extended_documentation/no_ex_of_use.html} \URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf} \URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc} \URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps} \URL[Extended documentation html]{ extended_documentation/no_ext_doc.html} \URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf} \URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc} \URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps} */ template<class TSparseSpace, class TDenseSpace , class TLinearSolver, class TVariableType > class ResidualBasedEliminationBuilderAndSolverComponentwise : public ResidualBasedEliminationBuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver > { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedEliminationBuilderAndSolverComponentwise ); typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType; typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> ResidualBasedEliminationBuilderAndSolverType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor. (with parameters) */ explicit ResidualBasedEliminationBuilderAndSolverComponentwise( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) : ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver) { // Validate default parameters Parameters default_parameters = Parameters(R"( { "name" : "ResidualBasedEliminationBuilderAndSolverComponentwise", "components_wise_variable" : "SCALAR_VARIABLE_OR_COMPONENT" })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); rVar = KratosComponents<TVariableType>::Get(ThisParameters["components_wise_variable"].GetString()); } /** * @brief Default constructor. Constructor. */ explicit ResidualBasedEliminationBuilderAndSolverComponentwise( typename TLinearSolver::Pointer pNewLinearSystemSolver,TVariableType const& Var) : ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver) , rVar(Var) { /* std::cout << "using the standard builder and solver " << std::endl; */ } /** Destructor. */ ~ResidualBasedEliminationBuilderAndSolverComponentwise() override {} /*@} */ /**@name Operators */ /*@{ */ //************************************************************************** //************************************************************************** void Build( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b) override { KRATOS_TRY if(!pScheme) KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", ""); //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //create a partition of the element array int number_of_threads = OpenMPUtils::GetNumThreads(); #ifdef _OPENMP int A_size = A.size1(); //creating an array of lock variables of the size of the system matrix std::vector< omp_lock_t > lock_array(A.size1()); for(int i = 0; i<A_size; i++) omp_init_lock(&lock_array[i]); #endif DenseVector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); if (this->GetEchoLevel()>0) { KRATOS_WATCH( number_of_threads ); KRATOS_WATCH( element_partition ); } double start_prod = OpenMPUtils::GetCurrentTime(); #pragma omp parallel for firstprivate(number_of_threads) schedule(static,1) for(int k=0; k<number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1]; unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(rVar); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it) { //calculate elemental contribution (*it)->InitializeNonLinearIteration(CurrentProcessInfo); (*it)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,CurrentProcessInfo); Geometry< Node<3> >& geom = (*it)->GetGeometry(); if(EquationId.size() != geom.size()) EquationId.resize(geom.size(),false); for(unsigned int i=0; i<geom.size(); i++) EquationId[i] = geom[i].GetDof(rVar,pos).EquationId(); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array); #else this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId); #endif } } DenseVector<unsigned int> condition_partition; CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition); #pragma omp parallel for firstprivate(number_of_threads) schedule(static,1) for(int k=0; k<number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Condition::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ConditionsArrayType::ptr_iterator it_begin=ConditionsArray.ptr_begin()+condition_partition[k]; typename ConditionsArrayType::ptr_iterator it_end=ConditionsArray.ptr_begin()+condition_partition[k+1]; unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(rVar); // A all elements for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it) { //calculate elemental contribution (*it)->InitializeNonLinearIteration(CurrentProcessInfo); (*it)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,CurrentProcessInfo); Geometry< Node<3> >& geom = (*it)->GetGeometry(); if(EquationId.size() != geom.size()) EquationId.resize(geom.size(),false); for(unsigned int i=0; i<geom.size(); i++) { EquationId[i] = geom[i].GetDof(rVar,pos).EquationId(); } #ifdef USE_LOCKS_IN_ASSEMBLY this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array); #else this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId); #endif } } if (this->GetEchoLevel()>0) { double stop_prod = OpenMPUtils::GetCurrentTime(); std::cout << "parallel building time: " << stop_prod - start_prod << std::endl; } #ifdef _OPENMP for(int i = 0; i<A_size; i++) omp_destroy_lock(&lock_array[i]); #endif KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part ) override { KRATOS_TRY //fills a list of "active" nodes defined as nodes which have neighbours // AND no fixed pressure mActiveNodes.clear(); mActiveNodes.reserve(r_model_part.Nodes().size() ); for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it) { if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { mActiveNodes.push_back(*(it.base() )); } } //fills the DofList and give a unique progressive tag to each node BaseType::mDofSet.clear(); BaseType::mDofSet.reserve(mActiveNodes.size() ); for(GlobalPointersVector< Node<3> >::iterator iii = mActiveNodes.begin(); iii!=mActiveNodes.end(); iii++) { BaseType::mDofSet.push_back( iii->pGetDof(rVar).get() ); } //throws an execption if there are no Degrees of freedom involved in the analysis if (BaseType::mDofSet.size()==0) KRATOS_THROW_ERROR(std::logic_error, "No degrees of freedom!", ""); BaseType::mDofSetIsInitialized = true; // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl << "Node : "<<dof_iterator->Id()<< std::endl << "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl; } } #endif KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { KRATOS_TRY if(pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0,0) ); pA.swap(pNewA); } if(pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0) ); pDx.swap(pNewDx); } if(pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0) ); pb.swap(pNewb); } if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) ); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType& A = *pA; TSystemVectorType& Dx = *pDx; TSystemVectorType& b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,false); #ifdef _OPENMP ParallelConstructGraph(A); #else ConstructGraph(A); #endif } else { if(A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { //KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,true); #ifdef _OPENMP ParallelConstructGraph(A); #else ConstructGraph(A); #endif } } if(Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize,false); if(b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize,false); // //if needed resize the vector for the calculation of reactions if(BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size(); if(BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize,false); } //swapping pointers // pA.swap(pNewA); // pDx.swap(pNewDx); // pb.swap(pNewb); #ifndef __SUNPRO_CC KRATOS_CATCH("") #endif } //************************************************************************** //************************************************************************** void Clear() override { this->mDofSet = DofsArrayType(); if(this->mpReactionsVector != NULL) { TSparseSpace::Clear( (this->mpReactionsVector) ); } // *(this->mpReactionsVector) = TSystemVectorType(); if (this->GetEchoLevel()>1) { KRATOS_WATCH("ResidualBasedEliminationBuilderAndSolver Clear Function called"); } } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedEliminationBuilderAndSolverComponentwise"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ /*@} */ /**@name Protected Operators*/ /*@{ */ //************************************************************************** //************************************************************************** //************************************************************************** //************************************************************************** void ConstructGraph(TSystemMatrixType& A) { KRATOS_TRY std::vector< std::vector<int> > index_list(BaseType::mEquationSystemSize); int total_size = 0; unsigned int pos = (mActiveNodes.begin())->GetDofPosition(rVar); //constructing the system matrix row by row int index_i; for(GlobalPointersVector< Node<3> >::iterator in = mActiveNodes.begin(); in!=mActiveNodes.end(); in++) { const Node<3>::DofType& current_dof = in->GetDof(rVar,pos); if( current_dof.IsFixed() == false) { index_i = (current_dof).EquationId(); GlobalPointersVector< Node<3> >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES); std::vector<int>& indices = index_list[index_i]; indices.reserve(neighb_nodes.size()+1); //filling the first neighbours list indices.push_back(index_i); for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { const Node<3>::DofType& neighb_dof = i->GetDof(rVar,pos); if(neighb_dof.IsFixed() == false ) { int index_j = (neighb_dof).EquationId(); indices.push_back(index_j); } } //sorting the indices and elminating the duplicates std::sort(indices.begin(),indices.end()); typename std::vector<int>::iterator new_end = std::unique(indices.begin(),indices.end()); indices.erase(new_end,indices.end()); total_size += indices.size(); } } A.reserve(total_size,false); //setting to zero the matrix (and the diagonal matrix) for(unsigned int i=0; i<BaseType::mEquationSystemSize; i++) { std::vector<int>& indices = index_list[i]; for(unsigned int j=0; j<indices.size(); j++) { A.push_back(i,indices[j] , 0.00); } } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** //************************************************************************** //************************************************************************** #ifdef _OPENMP void ParallelConstructGraph(TSystemMatrixType& A) { #ifndef __SUNPRO_CC KRATOS_TRY #endif std::vector< std::vector<int> > index_list(BaseType::mEquationSystemSize); int number_of_threads = omp_get_max_threads(); unsigned int pos = (mActiveNodes.begin())->GetDofPosition(rVar); //constructing the system matrix row by row DenseVector<unsigned int> partition; DenseVector<unsigned int> local_sizes(number_of_threads); for(int i=0; i<number_of_threads; i++) local_sizes[i] = 0; CreatePartition(number_of_threads, mActiveNodes.size(), partition); #pragma omp parallel for firstprivate(number_of_threads,pos) schedule(static,1) for(int k=0; k<number_of_threads; k++) { GlobalPointersVector< Node<3> >::iterator it_begin = mActiveNodes.begin()+partition[k]; GlobalPointersVector< Node<3> >::iterator it_end = mActiveNodes.begin()+partition[k+1]; for(GlobalPointersVector< Node<3> >::iterator in = it_begin; in!=it_end; in++) { const Node<3>::DofType& current_dof = in->GetDof(rVar,pos); if( current_dof.IsFixed() == false) { int index_i = (current_dof).EquationId(); GlobalPointersVector< Node<3> >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES); std::vector<int>& indices = index_list[index_i]; indices.reserve(neighb_nodes.size()+1); //filling the first neighbours list indices.push_back(index_i); for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { const Node<3>::DofType& neighb_dof = i->GetDof(rVar,pos); if(neighb_dof.IsFixed() == false ) { int index_j = (neighb_dof).EquationId(); indices.push_back(index_j); } } //sorting the indices and elminating the duplicates std::sort(indices.begin(),indices.end()); typename std::vector<int>::iterator new_end = std::unique(indices.begin(),indices.end()); indices.erase(new_end,indices.end()); local_sizes[k] += indices.size(); } } } //calculate the total size of the system int total_size = 0.0; for(int i=0; i<number_of_threads; i++) total_size += local_sizes[i]; A.reserve(total_size,false); //setting to zero the matrix (and the diagonal matrix) for(unsigned int i=0; i<BaseType::mEquationSystemSize; i++) { std::vector<int>& indices = index_list[i]; for(unsigned int j=0; j<indices.size(); j++) { A.push_back(i,indices[j] , 0.00); } } #ifndef __SUNPRO_CC KRATOS_CATCH("") #endif } #endif /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ TVariableType const & rVar; GlobalPointersVector<Node<3> > mActiveNodes; /*@} */ /**@name Private Operators*/ /*@{ */ //****************************************************************************************** //****************************************************************************************** inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, DenseVector<unsigned int>& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(unsigned int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class ResidualBasedEliminationBuilderAndSolverComponentwise */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE defined */
convolution_sgemm_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static inline signed char float2int8(float v) { int int32 = static_cast<int>(round(v)); if (int32 > 127) return 127; if (int32 < -127) return -127; return (signed char)int32; } static void conv_im2col_sgemm_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char* kernel = _kernel; // im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i + 1); const signed char* img2 = bottom_im2row.row<signed char>(i + 2); const signed char* img3 = bottom_im2row.row<signed char>(i + 3); signed char* tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p + 0) * inch * kernel_size; const signed char* k1 = kernel + (p + 1) * inch * kernel_size; const signed char* k2 = kernel + (p + 2) * inch * kernel_size; const signed char* k3 = kernel + (p + 3) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char* k0 = kernel + (p + 0) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; int* output0 = top_blob.channel(i); int* output1 = top_blob.channel(i + 1); int* output2 = top_blob.channel(i + 2); int* output3 = top_blob.channel(i + 3); int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; // k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; // k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; // k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; // k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { int* output = top_blob.channel(i); int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = sum[n]; } output += 4; } for (; j < N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum; output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } } static void conv_im2col_sgemm_int8_dequant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat& _bias, std::vector<float> scale_dequant, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char* kernel = _kernel; const float* bias = _bias; // im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i + 1); const signed char* img2 = bottom_im2row.row<signed char>(i + 2); const signed char* img3 = bottom_im2row.row<signed char>(i + 3); signed char* tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p + 0) * inch * kernel_size; const signed char* k1 = kernel + (p + 1) * inch * kernel_size; const signed char* k2 = kernel + (p + 2) * inch * kernel_size; const signed char* k3 = kernel + (p + 3) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char* k0 = kernel + (p + 0) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; const float bias0 = bias ? bias[i] : 0.f; const float bias1 = bias ? bias[i + 1] : 0.f; const float bias2 = bias ? bias[i + 2] : 0.f; const float bias3 = bias ? bias[i + 3] : 0.f; const float scale_dequant0 = scale_dequant[i]; const float scale_dequant1 = scale_dequant[i + 1]; const float scale_dequant2 = scale_dequant[i + 2]; const float scale_dequant3 = scale_dequant[i + 3]; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i + 1); float* output2 = top_blob.channel(i + 2); float* output3 = top_blob.channel(i + 3); int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; // k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; // k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; // k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; // k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = (float)sum0[n] * scale_dequant0 + bias0; output1[n] = (float)sum1[n] * scale_dequant1 + bias1; output2[n] = (float)sum2[n] * scale_dequant2 + bias2; output3[n] = (float)sum3[n] * scale_dequant3 + bias3; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = (float)sum0 * scale_dequant0 + bias0; output1[0] = (float)sum1 * scale_dequant1 + bias1; output2[0] = (float)sum2 * scale_dequant2 + bias2; output3[0] = (float)sum3 * scale_dequant3 + bias3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; const float scale_dequant0 = scale_dequant[i]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = (float)sum[n] * scale_dequant0 + bias0; } output += 4; } for (; j < N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = (float)sum * scale_dequant0 + bias0; output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } } static void conv_im2col_sgemm_int8_requant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat& _bias, std::vector<float> scale_requant, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char* kernel = _kernel; const float* bias = _bias; // im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i + 1); const signed char* img2 = bottom_im2row.row<signed char>(i + 2); const signed char* img3 = bottom_im2row.row<signed char>(i + 3); signed char* tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p + 0) * inch * kernel_size; const signed char* k1 = kernel + (p + 1) * inch * kernel_size; const signed char* k2 = kernel + (p + 2) * inch * kernel_size; const signed char* k3 = kernel + (p + 3) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char* k0 = kernel + (p + 0) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; signed char* output0 = top_blob.channel(i); signed char* output1 = top_blob.channel(i + 1); signed char* output2 = top_blob.channel(i + 2); signed char* output3 = top_blob.channel(i + 3); const float bias0 = bias ? bias[i] : 0.f; const float bias1 = bias ? bias[i + 1] : 0.f; const float bias2 = bias ? bias[i + 2] : 0.f; const float bias3 = bias ? bias[i + 3] : 0.f; const float scale_requant_in0 = scale_requant[2 * i]; const float scale_requant_out0 = scale_requant[2 * i + 1]; const float scale_requant_in1 = scale_requant[2 * (i + 1)]; const float scale_requant_out1 = scale_requant[2 * (i + 1) + 1]; const float scale_requant_in2 = scale_requant[2 * (i + 2)]; const float scale_requant_out2 = scale_requant[2 * (i + 2) + 1]; const float scale_requant_in3 = scale_requant[2 * (i + 3)]; const float scale_requant_out3 = scale_requant[2 * (i + 3) + 1]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; // k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; // k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; // k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; // k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = float2int8(((float)sum0[n] * scale_requant_in0 + bias0) * scale_requant_out0); output1[n] = float2int8(((float)sum1[n] * scale_requant_in1 + bias1) * scale_requant_out1); output2[n] = float2int8(((float)sum2[n] * scale_requant_in2 + bias2) * scale_requant_out2); output3[n] = float2int8(((float)sum3[n] * scale_requant_in3 + bias3) * scale_requant_out3); } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); output1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); output2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); output3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { signed char* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; const float scale_requant_in0 = scale_requant[2 * i]; const float scale_requant_out0 = scale_requant[2 * i + 1]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = float2int8(((float)sum[n] * scale_requant_in0 + bias0) * scale_requant_out0); } output += 4; } for (; j < N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = float2int8(((float)sum * scale_requant_in0 + bias0) * scale_requant_out0); output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } }
GB_binop__eq_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__eq_uint8 // A.*B function (eWiseMult): GB_AemultB__eq_uint8 // A*D function (colscale): GB_AxD__eq_uint8 // D*A function (rowscale): GB_DxB__eq_uint8 // C+=B function (dense accum): GB_Cdense_accumB__eq_uint8 // C+=b function (dense accum): GB_Cdense_accumb__eq_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_uint8 // C=scalar+B GB_bind1st__eq_uint8 // C=scalar+B' GB_bind1st_tran__eq_uint8 // C=A+scalar GB_bind2nd__eq_uint8 // C=A'+scalar GB_bind2nd_tran__eq_uint8 // C type: bool // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT8 || GxB_NO_EQ_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__eq_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__eq_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__eq_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__eq_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__eq_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__eq_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__eq_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__eq_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__eq_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__eq_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__eq_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
5446.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { #pragma omp parallel for schedule(dynamic, 16) collapse(2) num_threads(28) for (i = 1; i < n+1; i++) { #pragma omp parallel for schedule(dynamic, 16) collapse(2) num_threads(28) for (j = 1; j < n+1; j++) { #pragma omp parallel for schedule(dynamic, 16) num_threads(28) for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
TemporalRowConvolution.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "THNN/generic/TemporalRowConvolution.c" #else static inline void THNN_(TemporalRowConvolution_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kW, int dW, int padW) { THArgCheck(kW > 0, 5, "kernel size should be greater than zero, but got kW: %d", kW); THArgCheck(dW > 0, 6, "stride should be greater than zero, but got dW: %d", dW); THNN_ARGCHECK(!weight->is_empty() && weight->dim() == 3, 3, weight, "non-empty 3D weight tensor expected, but got: %s"); THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size(0)); } // we're always looking at (possibly batch) x feats x seq int ndim = input->dim(); int dimF = 0; int dimS = 1; if (ndim == 3) { ++dimS; ++dimF; } THNN_ARGCHECK(!input->is_empty() && (ndim == 2 || ndim == 3), 1, input, "non-empty 2D or 3D (batch mode) input tensor expected, but got :%s"); int64_t inputFrameSize = THTensor_sizeLegacyNoScalars(weight, 0); int64_t nInputFrame = input->size(dimS); int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; if (nOutputFrame < 1) { THError("Given input size: (%d x %d). " "Calculated output size: (%d x %d). Output size is too small", inputFrameSize, nInputFrame, inputFrameSize, nOutputFrame); } THNN_CHECK_DIM_SIZE(input, ndim, dimF, inputFrameSize); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimF, inputFrameSize); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimS, nOutputFrame); } } static void THNN_(unfolded_acc_row)( THTensor *finput, THTensor *input, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t c; scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); // #pragma omp parallel for private(c) for (c = 0; c < inputFrameSize; c++) { int64_t kw, x; int64_t ix = 0; for (kw = 0; kw < kW; kw++) { scalar_t *src = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); scalar_t *dst = input_data + c * (nInputFrame); ix = (size_t)(kw); if (dW == 1) { scalar_t *dst_slice = dst + (size_t)(ix); THVector_(cadd)(dst_slice, dst_slice, src, 1, nOutputFrame); } else { for (x = 0; x < nOutputFrame; x++) { scalar_t *dst_slice = dst + (size_t)(ix + x * dW); THVector_(cadd)(dst_slice, dst_slice, src + (size_t)(x), 1, 1); } } } } } static void THNN_(unfolded_copy_row)( THTensor *finput, THTensor *input, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t k; scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); // #pragma omp parallel for private(k) for (k = 0; k < inputFrameSize * kW; k++) { int64_t c = k / kW; int64_t rest = k % kW; int64_t kw = rest % kW; int64_t x; int64_t ix; scalar_t *dst = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); scalar_t *src = input_data + c * (nInputFrame); ix = (size_t)(kw); if (dW == 1) { memcpy(dst, src+(size_t)(ix), sizeof(scalar_t) * (nOutputFrame)); } else { for (x = 0; x < nOutputFrame; x++) { memcpy(dst + (size_t)(x), src + (size_t)(ix + x * dW), sizeof(scalar_t) * 1); } } } } static void THNN_(TemporalRowConvolution_updateOutput_frame)( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t i; THTensor *output3d = THTensor_(newWithStorage3d)( THTensor_getStoragePtr(output), output->storage_offset(), inputFrameSize, -1, 1, -1, nOutputFrame, -1); THNN_(unfolded_copy_row)(finput, input, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); THTensor_(zero)(output); if (bias != NULL) { for (i = 0; i < inputFrameSize; i++) THVector_(fill) (THStorage_(data)(THTensor_getStoragePtr(output)) + output->storage_offset() + output->stride(0) * i, THTensor_(get1d)(bias, i), nOutputFrame); } THTensor_(baddbmm)(output3d, 1, output3d, 1, weight, finput); c10::raw::intrusive_ptr::decref(output3d); } void THNN_(TemporalRowConvolution_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, // unused here but needed for Cuda int kW, int dW, int padW, bool featFirst) { int ndim = input->dim(); THTensor *tinput = NULL; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); } else { input = THTensor_(newContiguous)(input); } THNN_(TemporalRowConvolution_shapeCheck)( state, input, NULL, weight, bias, kW, dW, padW); int64_t inputFrameSize = THTensor_sizeLegacyNoScalars(weight, 0); int64_t nInputFrame = input->size(ndim - 1); int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; if (ndim == 2) { /* non-batch mode */ THTensor_(resize3d)(finput, inputFrameSize, kW, nOutputFrame); THTensor_(resize2d)(output, inputFrameSize, nOutputFrame); THTensor_(zero)(finput); THTensor_(zero)(output); THNN_(TemporalRowConvolution_updateOutput_frame) (input, output, weight, bias, finput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } else { int64_t T = input->size(0); int64_t t; THTensor_(resize4d)(finput, T, inputFrameSize, kW, nOutputFrame); THTensor_(resize3d)(output, T, inputFrameSize, nOutputFrame); THTensor_(zero)(finput); THTensor_(zero)(output); #pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(TemporalRowConvolution_updateOutput_frame) (input_t, output_t, weight, bias, finput_t, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); c10::raw::intrusive_ptr::decref(input_t); c10::raw::intrusive_ptr::decref(output_t); c10::raw::intrusive_ptr::decref(finput_t); } } if (!featFirst) { // NOTE: output will NOT be contiguous in this case THTensor_(transpose)(output, output, ndim - 1, ndim - 2); c10::raw::intrusive_ptr::decref(tinput); } c10::raw::intrusive_ptr::decref(input); } static void THNN_(TemporalRowConvolution_updateGradInput_frame)( THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { THTensor *gradOutput3d = THTensor_(newWithStorage3d)( THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), inputFrameSize, -1, 1, -1, nOutputFrame, -1); // weight: inputFrameSize x kW x 1 // gradOutput3d: inputFrameSize x 1 x nOutputFrame THTensor_(baddbmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput3d); // fgradInput: inputFrameSize x kW x nOutputFrame c10::raw::intrusive_ptr::decref(gradOutput3d); THTensor_(zero)(gradInput); THNN_(unfolded_acc_row)(fgradInput, gradInput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } void THNN_(TemporalRowConvolution_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kW, int dW, int padW, bool featFirst) { int ndim = input->dim(); THTensor *tinput, *tgradOutput; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); gradOutput = THTensor_(newContiguous)(tgradOutput); } else { input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); } THNN_(TemporalRowConvolution_shapeCheck)(state, input, gradOutput, weight, NULL, kW, dW, padW); int64_t inputFrameSize = THTensor_sizeLegacyNoScalars(weight, 0); int64_t nInputFrame = input->size(ndim - 1); int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; THTensor_(resizeAs)(fgradInput, finput); THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(fgradInput); THTensor_(zero)(gradInput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 1, 2); if (ndim == 2) { THNN_(TemporalRowConvolution_updateGradInput_frame) (gradInput, gradOutput, tweight, fgradInput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } else { int64_t T = input->size(0); int64_t t; #pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(TemporalRowConvolution_updateGradInput_frame) (gradInput_t, gradOutput_t, tweight, fgradInput_t, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); c10::raw::intrusive_ptr::decref(gradInput_t); c10::raw::intrusive_ptr::decref(gradOutput_t); c10::raw::intrusive_ptr::decref(fgradInput_t); } } c10::raw::intrusive_ptr::decref(tweight); if (!featFirst) { // NOTE: gradInput will NOT be contiguous in this case c10::raw::intrusive_ptr::decref(tinput); c10::raw::intrusive_ptr::decref(tgradOutput); THTensor_(transpose)(gradInput, gradInput, ndim - 1, ndim - 2); } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); } static void THNN_(TemporalRowConvolution_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, scalar_t scale) { int64_t i; THTensor *gradOutput3d = THTensor_(newWithStorage3d)( THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), gradOutput->size(0), -1, 1, -1, gradOutput->size(1), -1); THTensor *tfinput = THTensor_(new)(); THTensor_(transpose)(tfinput, finput, 1, 2); // gradOutput3d: inputFrameSize x 1 x nOutputFrame // finput: inputFrameSize x nOutputFrame x kW THTensor_(baddbmm)(gradWeight, 1, gradWeight, scale, gradOutput3d, tfinput); // gradWeight: inputFrameSize x 1 x kW c10::raw::intrusive_ptr::decref(tfinput); if (gradBias != NULL) { for (i = 0; i < THTensor_sizeLegacyNoScalars(gradBias, 0); i++) { int64_t k; scalar_t sum = 0; scalar_t *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput3d)) + gradOutput3d->storage_offset() + i * gradOutput3d->stride(0); for (k = 0; k < gradOutput3d->size(2); k++) { sum += data[k]; } (THStorage_(data)(THTensor_getStoragePtr(gradBias)) + gradBias->storage_offset())[i] += scale * sum; } } c10::raw::intrusive_ptr::decref(gradOutput3d); } void THNN_(TemporalRowConvolution_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int kW, int dW, int padW, bool featFirst, accreal scale_) { scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int ndim = input->dim(); THTensor *tinput = NULL; THTensor *tgradOutput = NULL; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); gradOutput = THTensor_(newContiguous)(tgradOutput); } else { input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); } THNN_(TemporalRowConvolution_shapeCheck) (state, input, gradOutput, gradWeight, gradBias, kW, dW, padW); if (ndim == 2) { THNN_(TemporalRowConvolution_accGradParameters_frame)( gradOutput, gradWeight, gradBias, finput, scale); } else { int64_t T = input->size(0); int64_t t; for (t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(TemporalRowConvolution_accGradParameters_frame)( gradOutput_t, gradWeight, gradBias, finput_t, scale); c10::raw::intrusive_ptr::decref(gradOutput_t); c10::raw::intrusive_ptr::decref(finput_t); } } if (!featFirst) { c10::raw::intrusive_ptr::decref(tinput); c10::raw::intrusive_ptr::decref(tgradOutput); } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); } #endif
multiple.c
#include <stdio.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> int main(int argc, char *argv[]) { int provided, rank, ntasks; int tid, nthreads, msg, i; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); /* Check that the MPI implementation supports MPI_THREAD_MULTIPLE */ if (provided < MPI_THREAD_MULTIPLE) { printf("MPI does not support MPI_THREAD_MULTIPLE\n"); MPI_Abort(MPI_COMM_WORLD, -1); return 0; } MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &ntasks); #pragma omp parallel private(msg, tid, nthreads, i) { nthreads = omp_get_num_threads(); tid = omp_get_thread_num(); if (rank == 0) { #pragma omp single { printf("%i threads in master rank\n", nthreads); } for (i = 1; i < ntasks; i++) MPI_Send(&tid, 1, MPI_INTEGER, i, tid, MPI_COMM_WORLD); } else { MPI_Recv(&msg, 1, MPI_INTEGER, 0, tid, MPI_COMM_WORLD, MPI_STATUS_IGNORE); printf("Rank %i thread %i received %i\n", rank, tid, msg); } } MPI_Finalize(); return 0; }
test_paje.c
#include <stdio.h> #include <string.h> #include <sys/time.h> #include <unistd.h> #include <math.h> #include "ParSHUM_solver.h" #include "ParSHUM_matrix.h" #include "ParSHUM_dense.h" #include "ParSHUM_enum.h" #include "ParSHUM_pivot_list.h" #include "ParSHUM_auxiliary.h" int main(int argc, char **argv) { ParSHUM_solver self; self = ParSHUM_solver_create(); ParSHUM_solver_parse_args(self, argc, argv, 1); ParSHUM_solver_read_matrix(self); ParSHUM_solver_init(self); printf("%d !\n",omp_get_thread_num()); #pragma omp parallel num_threads(self->exe_parms->nb_threads) //proc_bind(spread) { ParSHUM_verbose_trace_start_event(self->verbose, 0); sleep(1); ParSHUM_verbose_trace_stop_event(self->verbose); sleep(2); ParSHUM_verbose_trace_start_event(self->verbose, 1); sleep(1); ParSHUM_verbose_trace_stop_event(self->verbose); } ParSHUM_solver_finalize(self); ParSHUM_solver_destroy(self); return 0; }
yescrypt-opt_c.h
/*- * Copyright 2009 Colin Percival * Copyright 2013,2014 Alexander Peslyak * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file was originally written by Colin Percival as part of the Tarsnap * online backup system. */ #ifdef __i386__ #warning "This implementation does not use SIMD, and thus it runs a lot slower than the SIMD-enabled implementation. Enable at least SSE2 in the C compiler and use yescrypt-best.c instead unless you're building this SIMD-less implementation on purpose (portability to older CPUs or testing)." #elif defined(__x86_64__) #warning "This implementation does not use SIMD, and thus it runs a lot slower than the SIMD-enabled implementation. Use yescrypt-best.c instead unless you're building this SIMD-less implementation on purpose (for testing only)." #endif #include <errno.h> #include <stdint.h> #include <stdlib.h> #include "sha256.h" #include "sysendian.h" #include "yescrypt.h" #include "yescrypt-platform_c.h" static inline void blkcpy(uint64_t * dest, const uint64_t * src, size_t count) { do { *dest++ = *src++; *dest++ = *src++; *dest++ = *src++; *dest++ = *src++; } while (count -= 4); } static inline void blkxor(uint64_t * dest, const uint64_t * src, size_t count) { do { *dest++ ^= *src++; *dest++ ^= *src++; *dest++ ^= *src++; *dest++ ^= *src++; } while (count -= 4); } typedef union { uint32_t w[16]; uint64_t d[8]; } salsa20_blk_t; static inline void salsa20_simd_shuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout) { #define COMBINE(out, in1, in2) \ Bout->d[out] = Bin->w[in1 * 2] | ((uint64_t)Bin->w[in2 * 2 + 1] << 32); COMBINE(0, 0, 2) COMBINE(1, 5, 7) COMBINE(2, 2, 4) COMBINE(3, 7, 1) COMBINE(4, 4, 6) COMBINE(5, 1, 3) COMBINE(6, 6, 0) COMBINE(7, 3, 5) #undef COMBINE } static inline void salsa20_simd_unshuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout) { #define COMBINE(out, in1, in2) \ Bout->w[out * 2] = Bin->d[in1]; \ Bout->w[out * 2 + 1] = Bin->d[in2] >> 32; COMBINE(0, 0, 6) COMBINE(1, 5, 3) COMBINE(2, 2, 0) COMBINE(3, 7, 5) COMBINE(4, 4, 2) COMBINE(5, 1, 7) COMBINE(6, 6, 4) COMBINE(7, 3, 1) #undef COMBINE } /** * salsa20_8(B): * Apply the salsa20/8 core to the provided block. */ static void salsa20_8(uint64_t B[8]) { size_t i; salsa20_blk_t X; #define x X.w salsa20_simd_unshuffle((const salsa20_blk_t *)B, &X); for (i = 0; i < 8; i += 2) { #define R(a,b) (((a) << (b)) | ((a) >> (32 - (b)))) /* Operate on columns */ x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9); x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18); x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9); x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18); x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9); x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18); x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9); x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18); /* Operate on rows */ x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9); x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18); x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9); x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18); x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9); x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18); x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9); x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18); #undef R } #undef x { salsa20_blk_t Y; salsa20_simd_shuffle(&X, &Y); for (i = 0; i < 16; i += 4) { ((salsa20_blk_t *)B)->w[i] += Y.w[i]; ((salsa20_blk_t *)B)->w[i + 1] += Y.w[i + 1]; ((salsa20_blk_t *)B)->w[i + 2] += Y.w[i + 2]; ((salsa20_blk_t *)B)->w[i + 3] += Y.w[i + 3]; } } } /** * blockmix_salsa8(Bin, Bout, X, r): * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r * bytes in length; the output Bout must also be the same size. The * temporary space X must be 64 bytes. */ static void blockmix_salsa8(const uint64_t * Bin, uint64_t * Bout, uint64_t * X, size_t r) { size_t i; /* 1: X <-- B_{2r - 1} */ blkcpy(X, &Bin[(2 * r - 1) * 8], 8); /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < 2 * r; i += 2) { /* 3: X <-- H(X \xor B_i) */ blkxor(X, &Bin[i * 8], 8); salsa20_8(X); /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ blkcpy(&Bout[i * 4], X, 8); /* 3: X <-- H(X \xor B_i) */ blkxor(X, &Bin[i * 8 + 8], 8); salsa20_8(X); /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ blkcpy(&Bout[i * 4 + r * 8], X, 8); } } /* These are tunable */ #define S_BITS 8 #define S_SIMD 2 #define S_P 4 #define S_ROUNDS 6 /* Number of S-boxes. Not tunable, hard-coded in a few places. */ #define S_N 2 /* Derived values. Not tunable on their own. */ #define S_SIZE1 (1 << S_BITS) #define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8) #define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK) #define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD) #define S_P_SIZE (S_P * S_SIMD) #define S_MIN_R ((S_P * S_SIMD + 15) / 16) /** * pwxform(B): * Transform the provided block using the provided S-boxes. */ static void block_pwxform(uint64_t * B, const uint64_t * S) { uint64_t (*X)[S_SIMD] = (uint64_t (*)[S_SIMD])B; const uint8_t *S0 = (const uint8_t *)S; const uint8_t *S1 = (const uint8_t *)(S + S_SIZE1 * S_SIMD); size_t i, j; #if S_SIMD > 2 size_t k; #endif for (j = 0; j < S_P; j++) { uint64_t *Xj = X[j]; uint64_t x0 = Xj[0]; #if S_SIMD > 1 uint64_t x1 = Xj[1]; #endif for (i = 0; i < S_ROUNDS; i++) { uint64_t x = x0 & S_MASK2; const uint64_t *p0, *p1; p0 = (const uint64_t *)(S0 + (uint32_t)x); p1 = (const uint64_t *)(S1 + (x >> 32)); x0 = (uint64_t)(x0 >> 32) * (uint32_t)x0; x0 += p0[0]; x0 ^= p1[0]; #if S_SIMD > 1 x1 = (uint64_t)(x1 >> 32) * (uint32_t)x1; x1 += p0[1]; x1 ^= p1[1]; #endif #if S_SIMD > 2 for (k = 2; k < S_SIMD; k++) { x = Xj[k]; x = (uint64_t)(x >> 32) * (uint32_t)x; x += p0[k]; x ^= p1[k]; Xj[k] = x; } #endif } Xj[0] = x0; #if S_SIMD > 1 Xj[1] = x1; #endif } } /** * blockmix_pwxform(Bin, Bout, S, r): * Compute Bout = BlockMix_pwxform{salsa20/8, S, r}(Bin). The input Bin must * be 128r bytes in length; the output Bout must also be the same size. * * S lacks const qualifier to match blockmix_salsa8()'s prototype, which we * need to refer to both functions via the same function pointers. */ static void blockmix_pwxform(const uint64_t * Bin, uint64_t * Bout, uint64_t * S, size_t r) { size_t r1, r2, i; /* Convert 128-byte blocks to (S_P_SIZE * 64-bit) blocks */ r1 = r * 128 / (S_P_SIZE * 8); /* X <-- B_{r1 - 1} */ blkcpy(Bout, &Bin[(r1 - 1) * S_P_SIZE], S_P_SIZE); /* X <-- X \xor B_i */ blkxor(Bout, Bin, S_P_SIZE); /* X <-- H'(X) */ /* B'_i <-- X */ block_pwxform(Bout, S); /* for i = 0 to r1 - 1 do */ for (i = 1; i < r1; i++) { /* X <-- X \xor B_i */ blkcpy(&Bout[i * S_P_SIZE], &Bout[(i - 1) * S_P_SIZE], S_P_SIZE); blkxor(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE], S_P_SIZE); /* X <-- H'(X) */ /* B'_i <-- X */ block_pwxform(&Bout[i * S_P_SIZE], S); } /* Handle partial blocks */ if (i * S_P_SIZE < r * 16) blkcpy(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE], r * 16 - i * S_P_SIZE); i = (r1 - 1) * S_P_SIZE / 8; /* Convert 128-byte blocks to 64-byte blocks */ r2 = r * 2; /* B'_i <-- H(B'_i) */ salsa20_8(&Bout[i * 8]); i++; for (; i < r2; i++) { /* B'_i <-- H(B'_i \xor B'_{i-1}) */ blkxor(&Bout[i * 8], &Bout[(i - 1) * 8], 8); salsa20_8(&Bout[i * 8]); } } /** * integerify(B, r): * Return the result of parsing B_{2r-1} as a little-endian integer. */ static inline uint64_t integerify(const uint64_t * B, size_t r) { /* * Our 64-bit words are in host byte order, and word 6 holds the second 32-bit * word of B_{2r-1} due to SIMD shuffling. The 64-bit value we return is also * in host byte order, as it should be. */ const uint64_t * X = &B[(2 * r - 1) * 8]; uint32_t lo = X[0]; uint32_t hi = X[6] >> 32; return ((uint64_t)hi << 32) + lo; } /** * smix1(B, r, N, flags, V, NROM, shared, XY, S): * Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be even and * no smaller than 2. */ static void smix1(uint64_t * B, size_t r, uint64_t N, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) = (S ? blockmix_pwxform : blockmix_salsa8); const uint64_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1; size_t s = 16 * r; uint64_t * X = V; uint64_t * Y = &XY[s]; uint64_t * Z = S ? S : &XY[2 * s]; uint64_t n, i, j; size_t k; /* 1: X <-- B */ /* 3: V_i <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8]; for (k = 0; k < 16; k++) tmp->w[k] = le32dec(&src->w[k]); salsa20_simd_shuffle(tmp, dst); } /* 4: X <-- H(X) */ /* 3: V_i <-- X */ blockmix(X, Y, Z, r); blkcpy(&V[s], Y, s); X = XY; if (NROM && (VROM_mask & 1)) { if ((1 & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j = integerify(Y, r) & (NROM - 1); /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } blockmix(Y, X, Z, r); /* 2: for i = 0 to N - 1 do */ for (n = 1, i = 2; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy(&V[i * s], X, s); if ((i & (i - 1)) == 0) n <<= 1; /* j <-- Wrap(Integerify(X), i) */ j = integerify(X, r) & (n - 1); j += i - n; /* X <-- X \xor V_j */ blkxor(X, &V[j * s], s); /* 4: X <-- H(X) */ blockmix(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy(&V[(i + 1) * s], Y, s); j = integerify(Y, r); if (((i + 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } else { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i + 1 - n; /* X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); } blockmix(Y, X, Z, r); } } else { yescrypt_flags_t rw = flags & YESCRYPT_RW; /* 4: X <-- H(X) */ blockmix(Y, X, Z, r); /* 2: for i = 0 to N - 1 do */ for (n = 1, i = 2; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy(&V[i * s], X, s); if (rw) { if ((i & (i - 1)) == 0) n <<= 1; /* j <-- Wrap(Integerify(X), i) */ j = integerify(X, r) & (n - 1); j += i - n; /* X <-- X \xor V_j */ blkxor(X, &V[j * s], s); } /* 4: X <-- H(X) */ blockmix(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy(&V[(i + 1) * s], Y, s); if (rw) { /* j <-- Wrap(Integerify(X), i) */ j = integerify(Y, r) & (n - 1); j += (i + 1) - n; /* X <-- X \xor V_j */ blkxor(Y, &V[j * s], s); } /* 4: X <-- H(X) */ blockmix(Y, X, Z, r); } } /* B' <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8]; for (k = 0; k < 16; k++) le32enc(&tmp->w[k], src->w[k]); salsa20_simd_unshuffle(tmp, dst); } } /** * smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S): * Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be a * power of 2 greater than 1. The value Nloop must be even. */ static void smix2(uint64_t * B, size_t r, uint64_t N, uint64_t Nloop, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) = (S ? blockmix_pwxform : blockmix_salsa8); const uint64_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1 | 1; size_t s = 16 * r; yescrypt_flags_t rw = flags & YESCRYPT_RW; uint64_t * X = XY; uint64_t * Y = &XY[s]; uint64_t * Z = S ? S : &XY[2 * s]; uint64_t i, j; size_t k; if (Nloop == 0) return; /* X <-- B' */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8]; for (k = 0; k < 16; k++) tmp->w[k] = le32dec(&src->w[k]); salsa20_simd_shuffle(tmp, dst); } if (NROM) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], X, s); blockmix(X, Y, Z, r); j = integerify(Y, r); if (((i + 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } else { /* 7: j <-- Integerify(X) mod N */ j &= N - 1; /* 8: X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], Y, s); } blockmix(Y, X, Z, r); } } else { /* 6: for i = 0 to N - 1 do */ i = Nloop / 2; do { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], X, s); blockmix(X, Y, Z, r); /* 7: j <-- Integerify(X) mod N */ j = integerify(Y, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], Y, s); blockmix(Y, X, Z, r); } while (--i); } /* 10: B' <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8]; for (k = 0; k < 16; k++) le32enc(&tmp->w[k], src->w[k]); salsa20_simd_unshuffle(tmp, dst); } } /** * p2floor(x): * Largest power of 2 not greater than argument. */ static uint64_t p2floor(uint64_t x) { uint64_t y; while ((y = x & (x - 1))) x = y; return x; } /** * smix(B, r, N, p, t, flags, V, NROM, shared, XY, S): * Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the * temporary storage V must be 128rN bytes in length; the temporary storage * XY must be 256r+64 or (256r+64)*p bytes in length (the larger size is * required with OpenMP-enabled builds). The value N must be a power of 2 * greater than 1. */ static void smix(uint64_t * B, size_t r, uint64_t N, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { size_t s = 16 * r; uint64_t Nchunk = N / p, Nloop_all, Nloop_rw; uint32_t i; Nloop_all = Nchunk; if (flags & YESCRYPT_RW) { if (t <= 1) { if (t) Nloop_all *= 2; /* 2/3 */ Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */ } else { Nloop_all *= t - 1; } } else if (t) { if (t == 1) Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */ Nloop_all *= t; } Nloop_rw = 0; if (flags & __YESCRYPT_INIT_SHARED) Nloop_rw = Nloop_all; else if (flags & YESCRYPT_RW) Nloop_rw = Nloop_all / p; Nchunk &= ~(uint64_t)1; /* round down to even */ Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */ Nloop_rw &= ~(uint64_t)1; /* round down to even */ #ifdef _OPENMP #pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw) { #pragma omp for #endif for (i = 0; i < p; i++) { uint64_t Vchunk = i * Nchunk; uint64_t * Bp = &B[i * s]; uint64_t * Vp = &V[Vchunk * s]; #ifdef _OPENMP uint64_t * XYp = &XY[i * (2 * s + 8)]; #else uint64_t * XYp = XY; #endif uint64_t Np = (i < p - 1) ? Nchunk : (N - Vchunk); uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S; if (Sp) smix1(Bp, 1, S_SIZE_ALL / 16, flags & ~YESCRYPT_PWXFORM, Sp, NROM, shared, XYp, NULL); if (!(flags & __YESCRYPT_INIT_SHARED_2)) smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp); smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp, NROM, shared, XYp, Sp); } if (Nloop_all > Nloop_rw) { #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < p; i++) { uint64_t * Bp = &B[i * s]; #ifdef _OPENMP uint64_t * XYp = &XY[i * (2 * s + 8)]; #else uint64_t * XYp = XY; #endif uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S; smix2(Bp, r, N, Nloop_all - Nloop_rw, flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp); } } #ifdef _OPENMP } #endif } /** * yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, * N, r, p, t, flags, buf, buflen): * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r, * p, buflen), or a revision of scrypt as requested by flags and shared, and * write the result into buf. The parameters r, p, and buflen must satisfy * r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power * of 2 greater than 1. * * t controls computation time while not affecting peak memory usage. shared * and flags may request special modes as described in yescrypt.h. local is * the thread-local data structure, allowing to preserve and reuse a memory * allocation across calls, thereby reducing its overhead. * * Return 0 on success; or -1 on error. */ static int yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local, const uint8_t * passwd, size_t passwdlen, const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint8_t * buf, size_t buflen) { yescrypt_region_t tmp; uint64_t NROM; size_t B_size, V_size, XY_size, need; uint64_t * B, * V, * XY, * S; uint64_t sha256[4]; /* * YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose, * so don't let it have side-effects. Without this adjustment, it'd * enable the SHA-256 password pre-hashing and output post-hashing, * because any deviation from classic scrypt implies those. */ if (p == 1) flags &= ~YESCRYPT_PARALLEL_SMIX; /* Sanity-check parameters */ if (flags & ~YESCRYPT_KNOWN_FLAGS) { errno = EINVAL; return -1; } #if SIZE_MAX > UINT32_MAX if (buflen > (((uint64_t)(1) << 32) - 1) * 32) { errno = EFBIG; return -1; } #endif if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) { errno = EFBIG; return -1; } if (((N & (N - 1)) != 0) || (N <= 1) || (r < 1) || (p < 1)) { errno = EINVAL; return -1; } if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 1)) { errno = EINVAL; return -1; } #if S_MIN_R > 1 if ((flags & YESCRYPT_PWXFORM) && (r < S_MIN_R)) { errno = EINVAL; return -1; } #endif if ((p > SIZE_MAX / ((size_t)256 * r + 64)) || #if SIZE_MAX / 256 <= UINT32_MAX (r > SIZE_MAX / 256) || #endif (N > SIZE_MAX / 128 / r)) { errno = ENOMEM; return -1; } if (N > UINT64_MAX / ((uint64_t)t + 1)) { errno = EFBIG; return -1; } #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX) && (N > SIZE_MAX / 128 / (r * p))) { errno = ENOMEM; return -1; } #endif if ((flags & YESCRYPT_PWXFORM) && #ifndef _OPENMP (flags & YESCRYPT_PARALLEL_SMIX) && #endif p > SIZE_MAX / (S_SIZE_ALL * sizeof(*S))) { errno = ENOMEM; return -1; } NROM = 0; if (shared->shared1.aligned) { NROM = shared->shared1.aligned_size / ((size_t)128 * r); if (((NROM & (NROM - 1)) != 0) || (NROM <= 1) || !(flags & YESCRYPT_RW)) { errno = EINVAL; return -1; } } /* Allocate memory */ V = NULL; V_size = (size_t)128 * r * N; #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX)) V_size *= p; #endif need = V_size; if (flags & __YESCRYPT_INIT_SHARED) { if (local->aligned_size < need) { if (local->base || local->aligned || local->base_size || local->aligned_size) { errno = EINVAL; return -1; } if (!alloc_region(local, need)) return -1; } V = (uint64_t *)local->aligned; need = 0; } B_size = (size_t)128 * r * p; need += B_size; if (need < B_size) { errno = ENOMEM; return -1; } XY_size = (size_t)256 * r + 64; #ifdef _OPENMP XY_size *= p; #endif need += XY_size; if (need < XY_size) { errno = ENOMEM; return -1; } if (flags & YESCRYPT_PWXFORM) { size_t S_size = S_SIZE_ALL * sizeof(*S); #ifdef _OPENMP S_size *= p; #else if (flags & YESCRYPT_PARALLEL_SMIX) S_size *= p; #endif need += S_size; if (need < S_size) { errno = ENOMEM; return -1; } } if (flags & __YESCRYPT_INIT_SHARED) { if (!alloc_region(&tmp, need)) return -1; B = (uint64_t *)tmp.aligned; XY = (uint64_t *)((uint8_t *)B + B_size); } else { init_region(&tmp); if (local->aligned_size < need) { if (free_region(local)) return -1; if (!alloc_region(local, need)) return -1; } B = (uint64_t *)local->aligned; V = (uint64_t *)((uint8_t *)B + B_size); XY = (uint64_t *)((uint8_t *)V + V_size); } S = NULL; if (flags & YESCRYPT_PWXFORM) S = (uint64_t *)((uint8_t *)XY + XY_size); if (t || flags) { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, passwd, passwdlen); SHA256_Final((uint8_t *)sha256, &ctx); passwd = (uint8_t *)sha256; passwdlen = sizeof(sha256); } /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */ PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, (uint8_t *)B, B_size); if (t || flags) blkcpy(sha256, B, sizeof(sha256) / sizeof(sha256[0])); if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) { smix(B, r, N, p, t, flags, V, NROM, shared, XY, S); } else { uint32_t i; /* 2: for i = 0 to p - 1 do */ #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S) #endif for (i = 0; i < p; i++) { /* 3: B_i <-- MF(B_i, N) */ #ifdef _OPENMP smix(&B[(size_t)16 * r * i], r, N, 1, t, flags, &V[(size_t)16 * r * i * N], NROM, shared, &XY[((size_t)32 * r + 8) * i], S ? &S[S_SIZE_ALL * i] : S); #else smix(&B[(size_t)16 * r * i], r, N, 1, t, flags, V, NROM, shared, XY, S); #endif } } /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */ PBKDF2_SHA256(passwd, passwdlen, (uint8_t *)B, B_size, 1, buf, buflen); /* * Except when computing classic scrypt, allow all computation so far * to be performed on the client. The final steps below match those of * SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so * far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of * SCRAM's use of SHA-1) would be usable with yescrypt hashes. */ if ((t || flags) && buflen == sizeof(sha256)) { /* Compute ClientKey */ { HMAC_SHA256_CTX ctx; HMAC_SHA256_Init(&ctx, buf, buflen); HMAC_SHA256_Update(&ctx, "PPTPPubKey", 10); HMAC_SHA256_Final((uint8_t *)sha256, &ctx); } /* Compute StoredKey */ { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, (uint8_t *)sha256, sizeof(sha256)); SHA256_Final(buf, &ctx); } } if (free_region(&tmp)) return -1; /* Success! */ return 0; }
main_2.c
#include <omp.h> #include <math.h> #include <stdio.h> #include <stdint.h> #include <stdlib.h> const double pi = 3.14159265358979323846; void array_show(int32_t *a, int32_t numPoints) { for (int32_t i = 0; i < numPoints; ++i) { for (int32_t j = 0; j < numPoints; ++j) printf("%6d ", a[i*(numPoints) + j]); printf("\n"); } } int32_t set_XY_arrays(int32_t *X, int32_t *Y, int32_t numPoints, double delta) { int32_t i, j, size = 0; double x, y; for (i = 0; i < numPoints; ++i) { for (j = 0; j < numPoints; ++j) { x = i * delta; y = j * delta; if (x*x + y*y < 1.0) { X[size] = j; Y[size] = i; size++; } } } return size; } void calculate(int32_t *X, int32_t *Y, int32_t *Z, int32_t numPoints, int32_t size_XY, double delta, double mux_, double muy_, double b4, int32_t numThreads) { int32_t s, n, index; double x0, x1, px0, px1, px2, mux; double y0, y1, py0, py1, py2, muy; #pragma omp parallel for schedule(dynamic) num_threads(numThreads) for (s = 0; s < size_XY; s++) { x0 = X[s] * delta; y0 = Y[s] * delta; mux = 2.0 * pi * mux_; muy = 2.0 * pi * muy_; px0 = 0.0; py0 = 0.0; for (n = 0; n <= 100000; n++) { if (x0*x0 + y0*y0 < 1.0) { x1 = x0 * cos(mux) + px0 * sin(mux); px1 = -x0 * sin(mux) + px0 * cos(mux); y1 = y0 * cos(muy) + py0 * sin(muy); py1 = -y0 * sin(muy) + py0 * cos(muy); px2 = px1 + b4 * (x1*x1*x1 - 3.0*x1*y1*y1); py2 = py1 - b4 * (y1*y1*y1 - 3.0*x1*x1*y1); x0 = x1; y0 = y1; px0 = px2; py0 = py2; } else break; } index = numPoints * Y[s] + X[s]; Z[index] = n - 1; } } int main(int argc, char const *argv[]) { char *p1, *p2; int32_t numThreads = (int32_t) strtol(argv[1], &p1, 10); // first arg int32_t numPoints = (int32_t) strtol(argv[2], &p2, 10); // second arg double delta = 1.0 / (numPoints - 1); const double b4 = 0.5; const double mux = 0.32; const double muy = 0.32; int32_t *X, *Y, *Z; X = (int32_t *) malloc( numPoints * numPoints * sizeof(int32_t) ); Y = (int32_t *) malloc( numPoints * numPoints * sizeof(int32_t) ); Z = (int32_t *) calloc( numPoints * numPoints, sizeof(int32_t) ); // set zero int32_t size_XY; size_XY = set_XY_arrays(X, Y, numPoints, delta); calculate(X, Y, Z, numPoints, size_XY, delta, mux, muy, b4, numThreads); //array_show(Z, numPoints); free(X); free(Y); free(Z); free(K); return 0; }
cc_conv2d.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef ENABLE_OPENMP #include <omp.h> #endif #include "cc_assert.h" #include "cc_array.h" #include "cc_basic.h" #include "cc_fmap2d.h" #include "cc_pad2d.h" #include "cc_tsrmgr.h" #include "util_list.h" #include "util_log.h" #include "cc_conv2d.h" #include "global_fn_cfg.h" extern fn_conv2d _conv2d; cc_ssize cc_conv2d_shape_calc( cc_ssize i, cc_ssize k, cc_ssize s, cc_ssize p) { return (cc_ssize)((i - k + 2 * p) / s) + 1; } cc_tensor_t *cc_conv2d(const cc_tensor_t *inp, const cc_tensor_t *kernel, const cc_tensor_t *bias, cc_ssize s, cc_ssize p, cc_ssize off, const char *name) { cc_uint8 *omp_out_buf = NULL; cc_tensor_t *oup = NULL; const cc_tensor_t *inp_pad; cc_ssize o_ch_size, p_ch_mem_size, o_ch_mem_size, k_ch_mem_size, k_mem_size, num_omp_threads, i, j; cc_ssize shape[CC_CNN2D_SHAPE] = {0}; char pad_name[CC_TSR_NAME_LEN]; #ifdef ENABLE_CC_ASSERT cc_assert_zero(cc_dimension(inp) - CC_CNN2D_DIM); cc_assert_zero(cc_dimension(kernel) - CC_CONV2D_KERNEL_DIM); cc_assert_zero(*inp->dtype - *kernel->dtype); cc_assert_zero(inp->shape[CC_CNN2D_SHAPE_C] - kernel->shape[CC_CONV2D_KERNEL_I]); #endif if (p) { sprintf(pad_name, "%s%s", inp->name, CC_CONV2D_PAD_NAME_SURFFIX); inp_pad = cc_pad2d(inp, p, off, pad_name); } else inp_pad = inp; #ifdef AUTO_TSRMGR oup = cc_tsrmgr_get(name); #endif if (!oup) { shape[CC_CNN2D_SHAPE_C] = kernel->shape[CC_CONV2D_KERNEL_O]; shape[CC_CNN2D_SHAPE_H] = cc_conv2d_shape_calc( inp->shape[CC_CNN2D_SHAPE_H], kernel->shape[CC_CONV2D_KERNEL_H], s, p); shape[CC_CNN2D_SHAPE_W] = cc_conv2d_shape_calc( inp->shape[CC_CNN2D_SHAPE_W], kernel->shape[CC_CONV2D_KERNEL_W], s, p); oup = cc_create(shape, *inp->dtype, name); } o_ch_size = oup->shape[CC_CNN2D_SHAPE_W] * oup->shape[CC_CNN2D_SHAPE_H]; o_ch_mem_size = o_ch_size * cc_dtype_size(*oup->dtype); p_ch_mem_size = inp_pad->shape[CC_CNN2D_SHAPE_W] * inp_pad->shape[CC_CNN2D_SHAPE_H] * cc_dtype_size(*inp->dtype); k_ch_mem_size = kernel->shape[CC_CONV2D_KERNEL_W] * kernel->shape[CC_CONV2D_KERNEL_H] * cc_dtype_size(*kernel->dtype); k_mem_size = k_ch_mem_size * kernel->shape[CC_CONV2D_KERNEL_I]; num_omp_threads = 1; #ifdef ENABLE_OPENMP num_omp_threads = omp_get_max_threads(); #endif cc_assert_alloc(omp_out_buf = (cc_uint8*)malloc(o_ch_mem_size * num_omp_threads)); #ifdef AUTO_TSRMGR memset(oup->data, 0, list_getlen(oup->container, CC_TENSOR_DATA)); #endif #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, j) #endif for (i = 0; i < kernel->shape[CC_CONV2D_KERNEL_O]; ++i) { for (j = 0; j < kernel->shape[CC_CONV2D_KERNEL_I]; ++j) { #ifdef ENABLE_OPENMP _conv2d((inp_pad->data + j * p_ch_mem_size), omp_out_buf + omp_get_thread_num() * o_ch_mem_size, inp_pad->shape[CC_CNN2D_SHAPE_W], inp_pad->shape[CC_CNN2D_SHAPE_H], s, s, (kernel->data + (k_mem_size * i) + k_ch_mem_size * j), kernel->shape[CC_CONV2D_KERNEL_W], *kernel->dtype); cc_array_add_ew(oup->data + o_ch_mem_size * i, o_ch_size, oup->data + o_ch_mem_size * i, omp_out_buf + omp_get_thread_num() * o_ch_mem_size, *oup->dtype); #else _conv2d((inp_pad->data + p_ch_mem_size * j), omp_out_buf, inp_pad->shape[CC_CNN2D_SHAPE_W], inp_pad->shape[CC_CNN2D_SHAPE_H], s, s, (kernel->data + (k_mem_size * i) + k_ch_mem_size * j), kernel->shape[CC_CONV2D_KERNEL_W], *kernel->dtype); cc_array_add_ew(oup->data + o_ch_mem_size * i, o_ch_size, oup->data + o_ch_mem_size * i, omp_out_buf, *oup->dtype); #endif } } free(omp_out_buf); #ifndef AUTO_TSRMGR if (p) cc_free((cc_tensor_t*)inp_pad); #endif if (bias) oup = cc_fmap2d_bias(oup, bias, oup->name); return oup; } cc_tensor_t *cc_dw_conv2d(cc_tensor_t *inp, const cc_tensor_t *kernel, const cc_tensor_t *bias, cc_ssize s, cc_ssize p, cc_ssize off, const char *name) { cc_tensor_t *inp_pad, *oup = NULL; cc_ssize o_ch_size, p_ch_mem_size, o_ch_mem_size, k_ch_mem_size, i; cc_ssize shape[CC_CNN2D_SHAPE] = {0}; char pad_name[CC_TSR_NAME_LEN]; #ifdef ENABLE_CC_ASSERT cc_assert_zero(cc_dimension(inp) - CC_CNN2D_DIM); cc_assert_zero(cc_dimension(kernel) - CC_CONV2D_KERNEL_DIM); cc_assert_zero(*inp->dtype - *kernel->dtype); cc_assert_zero(inp->shape[CC_CNN2D_SHAPE_C] - kernel->shape[CC_CONV2D_KERNEL_O]); #endif if (p) { sprintf(pad_name, "%s%s", inp->name, CC_CONV2D_PAD_NAME_SURFFIX); inp_pad = cc_pad2d(inp, p, off, pad_name); } else inp_pad = inp; #ifdef AUTO_TSRMGR oup = cc_tsrmgr_get(name); #endif if (!oup) { shape[CC_CNN2D_SHAPE_C] = kernel->shape[CC_CONV2D_KERNEL_O]; shape[CC_CNN2D_SHAPE_H] = cc_conv2d_shape_calc( inp->shape[CC_CNN2D_SHAPE_H], kernel->shape[CC_CONV2D_KERNEL_H], s, p); shape[CC_CNN2D_SHAPE_W] = cc_conv2d_shape_calc( inp->shape[CC_CNN2D_SHAPE_W], kernel->shape[CC_CONV2D_KERNEL_W], s, p); oup = cc_create(shape, *inp->dtype, name); } o_ch_size = oup->shape[CC_CNN2D_SHAPE_W] * oup->shape[CC_CNN2D_SHAPE_H]; o_ch_mem_size = o_ch_size * cc_dtype_size(*oup->dtype); p_ch_mem_size = inp_pad->shape[CC_CNN2D_SHAPE_W] * inp_pad->shape[CC_CNN2D_SHAPE_H] * cc_dtype_size(*inp->dtype); k_ch_mem_size = kernel->shape[CC_CONV2D_KERNEL_W] * kernel->shape[CC_CONV2D_KERNEL_H] * cc_dtype_size(*kernel->dtype); #ifdef AUTO_TSRMGR memset(oup->data, 0, list_getlen(oup->container, CC_TENSOR_DATA)); #endif #ifdef ENABLE_OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < kernel->shape[CC_CONV2D_KERNEL_O]; ++i) { _conv2d((inp_pad->data + i * p_ch_mem_size), oup->data + i * o_ch_mem_size, inp_pad->shape[CC_CNN2D_SHAPE_W], inp_pad->shape[CC_CNN2D_SHAPE_H], s, s, kernel->data + (k_ch_mem_size * i), kernel->shape[CC_CONV2D_KERNEL_W], *kernel->dtype); } if (!bias){ #ifndef AUTO_TSRMGR if (p) cc_free(inp_pad); #endif return oup; } else { oup = cc_fmap2d_bias(oup, bias, oup->name); } #ifndef AUTO_TSRMGR if (p) cc_free(inp_pad); #endif return oup; } cc_tensor_t *cc_pw_conv2d(cc_tensor_t *inp, const cc_tensor_t *kernel, const cc_tensor_t *bias, const char *name) { cc_uint8 *omp_out_buf = NULL; cc_tensor_t *oup = NULL; cc_ssize o_ch_size, o_ch_mem_size, k_ch_mem_size, k_mem_size, num_omp_threads, i, j; cc_ssize shape[CC_CNN2D_SHAPE] = {0}; #ifdef ENABLE_CC_ASSERT cc_assert_zero(cc_dimension(inp) - CC_CNN2D_DIM); cc_assert_zero(cc_dimension(kernel) - CC_CONV2D_KERNEL_DIM); cc_assert_zero(*inp->dtype - *kernel->dtype); cc_assert_zero(inp->shape[CC_CNN2D_SHAPE_C] - kernel->shape[CC_CONV2D_KERNEL_I]); #endif #ifdef AUTO_TSRMGR oup = cc_tsrmgr_get(name); #endif if (!oup) { shape[CC_CNN2D_SHAPE_C] = kernel->shape[CC_CONV2D_KERNEL_O]; shape[CC_CNN2D_SHAPE_H] = inp->shape[CC_CNN2D_SHAPE_H]; shape[CC_CNN2D_SHAPE_W] = inp->shape[CC_CNN2D_SHAPE_W]; oup = cc_create(shape, *inp->dtype, name); } o_ch_size = oup->shape[CC_CNN2D_SHAPE_W] * oup->shape[CC_CNN2D_SHAPE_H]; o_ch_mem_size = o_ch_size * cc_dtype_size(*oup->dtype); k_ch_mem_size = kernel->shape[CC_CONV2D_KERNEL_W] * kernel->shape[CC_CONV2D_KERNEL_H] * cc_dtype_size(*kernel->dtype); k_mem_size = k_ch_mem_size * kernel->shape[CC_CONV2D_KERNEL_I]; num_omp_threads = 1; #ifdef ENABLE_OPENMP num_omp_threads = omp_get_max_threads(); #endif cc_assert_alloc(omp_out_buf = (cc_uint8*)malloc(o_ch_mem_size * num_omp_threads)); #ifdef AUTO_TSRMGR memset(oup->data, 0, list_getlen(oup->container, CC_TENSOR_DATA)); #endif #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, j) #endif for (i = 0; i < kernel->shape[CC_CONV2D_KERNEL_O]; ++i) { for (j = 0; j < kernel->shape[CC_CONV2D_KERNEL_I]; ++j) { #ifdef ENABLE_OPENMP cc_array_mul_by( omp_out_buf + omp_get_thread_num() * o_ch_mem_size, o_ch_size, inp->data + o_ch_mem_size * j, kernel->data + k_mem_size * i + k_ch_mem_size * j, *oup->dtype); cc_array_add_ew(oup->data + o_ch_mem_size * i, o_ch_size, oup->data + o_ch_mem_size * i, omp_out_buf + omp_get_thread_num() * o_ch_mem_size, *oup->dtype); #else cc_array_mul_by(omp_out_buf, o_ch_size, inp->data + o_ch_mem_size * j, kernel->data + k_mem_size * i + k_ch_mem_size * j, *oup->dtype); cc_array_add_ew(oup->data + o_ch_mem_size * i, o_ch_size, oup->data + o_ch_mem_size * i, omp_out_buf, *oup->dtype); #endif } } free(omp_out_buf); if (!bias) return oup; else oup = cc_fmap2d_bias(oup, bias, oup->name); return oup; }
api.c
// RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu #include <stdio.h> #include <omp.h> // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL extern void __tgt_register_requires(int64_t); // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- #pragma omp requires unified_shared_memory #define N 1024 void init(int A[], int B[], int C[]) { for (int i = 0; i < N; ++i) { A[i] = 0; B[i] = 1; C[i] = i; } } int main(int argc, char *argv[]) { const int device = omp_get_default_device(); // Manual registration of requires flags for Clang versions // that do not support requires. __tgt_register_requires(8); // CHECK: Initial device: -10 printf("Initial device: %d\n", omp_get_initial_device()); // // Target alloc & target memcpy // int A[N], B[N], C[N]; // Init init(A, B, C); int *pA, *pB, *pC; // map ptrs pA = &A[0]; pB = &B[0]; pC = &C[0]; int *d_A = (int *)omp_target_alloc(N * sizeof(int), device); int *d_B = (int *)omp_target_alloc(N * sizeof(int), device); int *d_C = (int *)omp_target_alloc(N * sizeof(int), device); // CHECK: omp_target_alloc succeeded printf("omp_target_alloc %s\n", d_A && d_B && d_C ? "succeeded" : "failed"); omp_target_memcpy(d_B, pB, N * sizeof(int), 0, 0, device, omp_get_initial_device()); omp_target_memcpy(d_C, pC, N * sizeof(int), 0, 0, device, omp_get_initial_device()); #pragma omp target is_device_ptr(d_A, d_B, d_C) device(device) { #pragma omp parallel for schedule(static, 1) for (int i = 0; i < N; i++) { d_A[i] = d_B[i] + d_C[i] + 1; } } omp_target_memcpy(pA, d_A, N * sizeof(int), 0, 0, omp_get_initial_device(), device); // CHECK: Test omp_target_memcpy: Succeeded int fail = 0; for (int i = 0; i < N; ++i) { if (A[i] != i + 2) fail++; } if (fail) { printf("Test omp_target_memcpy: Failed\n"); } else { printf("Test omp_target_memcpy: Succeeded\n"); } // // target_is_present and target_associate/disassociate_ptr // init(A, B, C); // CHECK: B is not present, associating it... // CHECK: omp_target_associate_ptr B succeeded if (!omp_target_is_present(B, device)) { printf("B is not present, associating it...\n"); int rc = omp_target_associate_ptr(B, d_B, N * sizeof(int), 0, device); printf("omp_target_associate_ptr B %s\n", !rc ? "succeeded" : "failed"); } // CHECK: C is not present, associating it... // CHECK: omp_target_associate_ptr C succeeded if (!omp_target_is_present(C, device)) { printf("C is not present, associating it...\n"); int rc = omp_target_associate_ptr(C, d_C, N * sizeof(int), 0, device); printf("omp_target_associate_ptr C %s\n", !rc ? "succeeded" : "failed"); } // CHECK: Inside target data: A is not present // CHECK: Inside target data: B is present // CHECK: Inside target data: C is present #pragma omp target data map(from : B, C) device(device) { printf("Inside target data: A is%s present\n", omp_target_is_present(A, device) ? "" : " not"); printf("Inside target data: B is%s present\n", omp_target_is_present(B, device) ? "" : " not"); printf("Inside target data: C is%s present\n", omp_target_is_present(C, device) ? "" : " not"); #pragma omp target map(from : A) device(device) { #pragma omp parallel for schedule(static, 1) for (int i = 0; i < N; i++) A[i] = B[i] + C[i] + 1; } } // CHECK: B is present, disassociating it... // CHECK: omp_target_disassociate_ptr B succeeded // CHECK: C is present, disassociating it... // CHECK: omp_target_disassociate_ptr C succeeded if (omp_target_is_present(B, device)) { printf("B is present, disassociating it...\n"); int rc = omp_target_disassociate_ptr(B, device); printf("omp_target_disassociate_ptr B %s\n", !rc ? "succeeded" : "failed"); } if (omp_target_is_present(C, device)) { printf("C is present, disassociating it...\n"); int rc = omp_target_disassociate_ptr(C, device); printf("omp_target_disassociate_ptr C %s\n", !rc ? "succeeded" : "failed"); } // CHECK: Test omp_target_associate_ptr: Succeeded fail = 0; for (int i = 0; i < N; ++i) { if (A[i] != i + 2) fail++; } if (fail) { printf("Test omp_target_associate_ptr: Failed\n"); } else { printf("Test omp_target_associate_ptr: Succeeded\n"); } omp_target_free(d_A, device); omp_target_free(d_B, device); omp_target_free(d_C, device); printf("Done!\n"); return 0; }
GB_binop__min_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_fp64 // A.*B function (eWiseMult): GB_AemultB__min_fp64 // A*D function (colscale): GB_AxD__min_fp64 // D*A function (rowscale): GB_DxB__min_fp64 // C+=B function (dense accum): GB_Cdense_accumB__min_fp64 // C+=b function (dense accum): GB_Cdense_accumb__min_fp64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_fp64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_fp64 // C=scalar+B GB_bind1st__min_fp64 // C=scalar+B' GB_bind1st_tran__min_fp64 // C=A+scalar GB_bind2nd__min_fp64 // C=A'+scalar GB_bind2nd_tran__min_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = fmin (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = fmin (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_FP64 || GxB_NO_MIN_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__min_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = fmin (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = fmin (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pfx_ng_fmt_plug.c
/* * This software is Copyright (c) 2016, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pfx_ng; #elif FMT_REGISTERS_H john_register_one(&fmt_pfx_ng); #else #include <string.h> #include "arch.h" #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "hmac_sha.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "twofish.h" #include "sha.h" #include "loader.h" #include "simd-intrinsics.h" #include "pkcs12.h" #include "memdbg.h" #define FORMAT_LABEL "pfx-ng" #define FORMAT_NAME "" #define ALGORITHM_NAME "PKCS12 PBE (.pfx, .p12) (SHA-1 to SHA-512) " SHA1_ALGORITHM_NAME // I could not get openssl to use passwords > 48 bytes, so we will cut support at this length. #define PLAINTEXT_LENGTH 48 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(ARCH_WORD_32) #define BINARY_SIZE 20 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #if !defined(SIMD_COEF_32) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #else // FIXME. We have to handle this in some other manner (in init). We need to // find the LCM of all possible 'groups'. So if we have 2 8 and 24 as our // groups, this count needs to be 24. If it was 2 8 24 and 32, then we would // need min/max keys to be 96 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #endif #define FORMAT_TAG "$pfxng$" #define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define MAX_DATA_LENGTH 8192 // XXX ensure this is large enough static struct fmt_tests tests[] = { {"$pfxng$1$20$2048$8$e861d3357729c35f$308206513082032f06092a864886f70d010706a08203203082031c0201003082031506092a864886f70d010701301c060a2a864886f70d010c0103300e04086c933ea5111fd24602020800808202e83c56ad18c45e54aaca4d170750cfbfb3059d6cf161e49d379eab15e722216cb479eee8da7b6e6ffc89e01fbf30f4eb5e1b88ca146c166c700a68473d25a0979344cc60d1e58230a12d24b8be6e9174d3afecdf111cd7d96527831ac9c8f4bf3817cda021f34b61899f2a75fe511e8dedfb70367fa9902d2d3e500f853cc5a99ec8672a44713d24ae49382a20db6349bc48b23ad8d4be3aa31ba7e6d720590b5e4f6b0b5d84b7789ae9da7a80bfa3c27e507fc87e7bc943cff967db6b76f904ac52c1db5cfe9915fa3493cd42b8db6deae62bc01593e34bc8598f27a24cdfd242701ff72d997f959f3a933ab5a2762df33849c116715b78cb0d83267aff913619cbbdf003e13318e4b188a8a4f851b9f59ae2c71ab215c565f7872e5d54c06f92d6f59eaf19d95f9b4526d52d289cd17bc0c2079f9f13c20a70a566773d90ca6d888386d909b6362cb79e15cf547dceab1fe793c577b70f72463969f7b416fb5a6228053363558df18588b53406343ab320a1bbf1757b67ef8e3075f44dee4521f4a461d37ea894c940bc87f9bd33276f2843ff5922fd8e61d22a8619ad23154880fd7d957c0f151458fc4f686d96695a823b08c1795daaf79e41118a3c57ee065a693853a9c4b2004440662f51d63bb9973dc4bb8c541d424416c57d01a825be4d31dab7c7f4b2b738e4bbfdda1e3d3b95e026dadee4dfe155c0f4a24991693f679b452516bc19eab7cf7eb41b476358583d46630e8cda55974b8fcbe25b93e91e73f584a913149137c1c20d13f38826d8dba9bcf5504b8cee77e20a19d6fb050e9213b8aeb11c26a871c600701aea352ba2dcea15d8010d25034f64aa488b580b8282d226f8203bba6aa424b0a25bcceb9c7c718b6c276022d988ca063d2e88350d68903f95aa3265b44d909c07fa9477a5dfcfe3b5ed49b789d6e1c13aca012630343021dbc0c0f17dae6688eae495b76d21be49ced2c2e98e1068d8725d8a581958fb2530871dff1b3f910ae8beb3bc07bfb4b1d2d73fc5d440dc9bcd32ba656c32e357051bef3082031a06092a864886f70d010701a082030b0482030730820303308202ff060b2a864886f70d010c0a0102a08202a6308202a2301c060a2a864886f70d010c0103300e0408749558ace83617660202080004820280ef790b9cd427ec99a350a6e3afb1727cf3dd859d5377897805a7093e1ca42ab8cccc6c52d2b86d61ed55b5bd743fb2a4ec556b438933a9d97a55e5ad1fb3f9967e550be3d708feb5c7287e31afed165a4a91bd5a80292a1e061f97a8c11339963843348badf3fd898e89fd92bda5ad0195d8d4f75e7bce9f0518eeb85365860cd32ad5cea0958efef02bfb74aec0af0765729dae079f5eb08b099d3b06a9b9c6cd6f1e1e4170208ebec3c61ae3421e90cef0f2b5cd2187e43cc4ceecf4aec06340f886efb94f517e578d13659392246a69505de3914b719fba74709ef0f03f010429f899dbddab950f6e58462b2fe2663986a5e0c8ff235e89bca3bb6e41fcd602a0277a83822ac1a14101c83fd1cafdc45c1980ecf54ef092deb2fea736b428158e0847256fc1211f94ea8075145be5a5fb26206e125d55f45500844f1a83f063d0be19b60427dadbd89109bb9ee31a1ac79c863204e8e80c044b8b6bc45c756c26be514e4270a293faf4608065a27b4a51253cb9f831614d5c7f25ec1d4e36063e68e4e405c1f4deb98a786c57a376609441f2dcbe6393487b884624570f6cbb02b53f58ea4acb0faedd2931293dc87664a0c589322480686f6613ffb794c3b3b1872cd7a418712a35666b53bd8383f2e7aa6e8a9e20dd3d46cc3aaaaf17841732dde708ba5611ebcc8777fb3f7b65f2cf95992fdf4f5a17ddf01f3ebe5fb6c9cd58cb74553865cbec3c9d391dcc3e96e654faf7be7fdc8d5fb5dff98799e740147d2ca4b6df47560a4a20bd8f30cf5b495f4e919c9efad3aa59491a3e2ba4e53606e2016ce13e8271e70ccd5b57eec99a8604caf5997e648f3eb541769267f9cdf76aa84917ebd8a1f60a973ed22cca9fa0d3589bb77dafed82ea4f8cd19d3146301f06092a864886f70d01091431121e10006f00700065006e00770061006c006c302306092a864886f70d01091531160414a38a6be4b090be5e29259879b75e0e482f4a4dd8$a790274918578289d80aa9fd0d526923f7b8f4d4", "openwall"}, {"$pfxng$1$20$1024$20$456a2344e138862de7ad2e0b274952ef566e2b63$308209cb3082057806092a864886f70d010701a082056904820565308205613082055d060b2a864886f70d010c0a0102a08204fa308204f63028060a2a864886f70d010c0103301a0414e9a49f4190a3084e02ceba2f049303750f6646da02020400048204c8cd40bb89c287b9fe70a88825e33a648c76aa1b35d93131d445e48943ee50ff8a0aee6a0483a289fbacf21290a8553e3414ea6bd6b305407d709bbaf915a99430c998d9ba68e71f4036d42feb386061d645433390658df91bd4e9750a39f9288f7cf8001e2adc8e4d7480f1a5e2d63799df20d9eb956f86b33330ec2c206b1ae47cf54d9cf2cdd970664c251e64cc725456e2c14506cfd7d9ff1d2894a50093fff4f29d5967a0f788ed707ade93cb3ad7e87d96dad844d2037f4d5e863ec5170c0f1d4514d752a266cd4db49b63c5d86646e54a68187ddc99b00499286f79e2e7c54e30d3a1b1f035d7113180d072c7218d399f8b5427dc2d6fcb42518bd6bb97f74c97ea2358ef39fb176397fe7729cd5c3a474423f0a0e74a91c77bb27b24f82463081fed53bdf216840b2c60482846010b654e2c74db4abfb07936e0cc9d0d133ac7a4baa03091de25f6eea70d85fe9376349731ecc03fe437175101fd6698929f43a94835c6453b68335f478cfa1fab1ddf0236570ca5a07cebf1aa3c36d7804654a5eac8328377abba3b81627fcac7f1dbdb56ba1f0f861af9967c5d245459a81891fb5dd833f0bca633eb616cf10397b295d857c63501e85fb9f11f1fd3dd80baac425ecf0efa012817ca9b23e06575a3942613fad67b4bda4fabfd29bd1897b0623d6d47ec000bd656f5b7c78b9a4808ac022524b17a8df676b86dc29b6d008d09cb1148110bd07464c071504d7dae5803602247da1e4cd5d490771322d7eb568d0ad0293f4d2626ac0f60f568a92eccd097f6d5247e043b7cdb52ddfef0516e7053fb42b7d1b16564f1c862c1bf45436290a5dab1f0e90b24bdd4433ce0cbcc7b0eafc445dcc6fe8a52e606d3977ce6d9e44f037ea8dbf36bce63a877aaafde13b1bb5005856d315f30fd4feaf26ef8eeef899802aa2442364c147b074c64878a696a1f2cadd9bacb187b62c239c16f163d6c44e157dd8daa4610142eb40dadbc3405c4ade7d127db20bc4384bd1d4c2a2a5dc907aa0468c2485654bceeee3d4011d74e6e85ed88811ccf1cd6b3d5540c5709b8e14fb9e610b552502343ec739e8c9c6d6459062f76275de1fa1b24ed8a9924ea9176dfb89520b7fbec9e9968bd0320afc513e560966b524a82ef5a206f1823742e820bbbe6dca6b0a33c8f04208376bfd01f049f666c735b1efe2550a8601b1839bf045c56a9772a3e25235d2fb61f9007713ff57ae47f6335a44e6730bdaaebe833996aaaa78138ddb7d8719570a429debb8183fbd07f71a037335ec5b1d40c62f7163b85dc71d8db536c9092f155429b65ea81f8ff3c7892ebf881c107ea2c167df47d044ae7ed3fb5328d673753450c82d7049dfeaf1dde821a0ee0d6676a1656584cdbd4532f8d2493ea4794d88acacb147f19ca15777a67fe5031991ebc45ea43e87574f9d2f52de0722d6cc7f5b7a378a461148f1f7c5ee8bc7c7ae4fe80b4eed13b35d16906a084120c645812db0bd70e419c004512f284ab7635f17ee2ecc728aef2cda256b86fb4cc9d3e21736249735962d6ccd307a67fdbdb0815184f116eb1747de19449c6fb9410cb669fa2a3f2ab5ca16c3cca918555b583f61f2126aa0895ccdac7a5604ca1e84a76c15c508d620bb9037e5e5acf97e94438a059bc771d84dc1f63fd3f4780274a2f0a03f9b09a0cf4638e0c317f6ebb24f9062fe8c7023d4c06f3c67c9ac2008e8da33150302b06092a864886f70d010914311e1e1c006d0079005f00630065007200740069006600690063006100740065302106092a864886f70d0109153114041254696d6520313334303937373036353139303082044b06092a864886f70d010706a082043c308204380201003082043106092a864886f70d0107013028060a2a864886f70d010c0106301a04147d79e2d2b2986ea4d929b3ba8b956739a393b00802020400808203f82c0ebc2a236e5ffc4dff9e02344449f642fdf3b16da9b2e56d5a5e35f323b23b8ff915fbaf2ff70705465170ccd259a70bb1cde9f76e593f9a7a0d4764806dad2fa5c3b1ee2711e9dbbcaa874f8985f1b6c2ca1d55c919cf9e88aababe7826107cdb937e7cca57809b20a6351504ab688327e4df957a3c167772cf66aed6a2007ead81896465d4931efe7c3291a49761f1428c766fd82e1736218e90d9f8592475d164d9a79f3424cb6a543f7040d3f0dba6996d496f4f603b7d59527e5c9c89b3f96c55fa73b72385629cbd606cf9f88833db66bb1519dee62a0cd4989d93457fa1162b594b86bc7134c9aa530fe10d62b914f1818395f82d5224c3bc793a04b0ab41dc98694535f5bfbf2aa943d6c794f407e02248be842c55789091d1cc28bbfdf86bc1346142b057558ce1e64e38f8b2d7d68d539150f3de23f43d59637ae678f3687e69b52fdf46f54c32b84a658a2a69fb16da7ebb45ea84c9e38d6cedfc1227b86a6ea3094d0908d588213834192849fa5c25b2460bb22fdd9d9e317efaca646ea582ecb50f6a466f55ae38573afe904eadf42b6c596c8740dbf92cbd38c347624f3399ac2d20d0727f897f38417901dfdaa798631af8992fcad5d708882576036531d2deb867fe46d63921dc50b8c73fbc59586a861d7ae47c2a5ff892e9dffc6d8e6e8161506819ebc020cfb7bc4c1708832d53f8cc864012ab8379a1323e23b0edb5ffe48a942411cef6197f5545ae6822a3096db972f96d4d200ba600a1e95595d4532e7a9861b233f71ff37ea3c19143c87dd6d4a3f3186a7693dc11067c7b4c967984d4bbbf9d88acacb1ff3ba4536ea265a0503865d86af408748fe8191119cd7b570b5352f190265d5d468e911ba0020b526d3892119fda21243568cfa638251c9044c91a88d2f8a05dd0d90088b0b79ac2a2ca263aa108160a7f6943ce709a02743afb6e4ec9a7f7535635f839c2baf938418accec3d5c1ad2bbcec69ab337155bd0bb1b45c7e16e32f251d4da7796f013d6d502581853da6ab9736382115141886c14512fb5ca22e3e9e20366257579eb4225a6a3716457b9b1c0df63cb71a34b888de021f3520d62e96675ea8767e23d55b50e9aa40babafe398f5482c83f8caa57d7ed3486ce7dedace7158067194892defe38af28c1695cd6f14a1ddae959541fab3b59e72c17d2a67d980c749ef00b1f61ece68d81c79b4ec4f4d9eeaad43895a0dc9d86f4d7fe114f01189b3db72ee92963d4403c3aca8bf6d60ef7ee7fcd8102b3247048b4d517cd0ab76a0f8d68d33733934cb35a8e40d7de70c4f166c453fda74553069c51dd33f6f513bb9ef0a983187fc7d896c668590577a4e269688cc7b9fbd1f3fe77d3f431cf002043c43e1cae82b22018931f1337ee276d49c19163a866ef10a64ac5b013db1cb1c$501f5cd8e454e44b6925715c4d2605a8d4ce70d0", "my_password"}, {"$pfxng$1$20$2048$8$c70bc3c11be46232$308205f9308202cf06092a864886f70d010706a08202c0308202bc020100308202b506092a864886f70d010701301c060a2a864886f70d010c0103300e0408aeab408a953dae400202080080820288eac5f49ac4a3c50ec87cfd7592cd19e7deafbd62f58eb68ec542bf073778bf238533fc1363ff41e87dc72e75d97fbbd9707ca0fa171216f5c5d56906efc96f15883138b31a151b40ae2d72e7d4310095f03c85d75672d983566db3cae50c59613a26b64d54fcaa5cd8c328854359868eae40e66c7f527ce213d3a8645d012afa3fbb9ddab6c6dd1bc3863cc2c0014380e606da2f7f7ede8ef1c8a35d48b4f150651387461cf1327f12629411b3b3f7b0d8e3dce9e03b5ef52b1cb911b469685b491ceec0276a6c3a2e64beab805fa67cea73c0ed6bd498d563a89b874032fed141857f0a80342442d20af18a084877df28b3abd4c9d7218551bef523c17b4729d0689b0833e190e3e60995ca3fe5075629ea4ffde3e65f20777086d5cbcfe742cc22ef46d06e9ba35e4017eb35fec30cb7ddc37fa22daa9e77e202d864f6d34541d854f00f9e8c1445ac432bff67a5a00b6cd0da5eb796c7a44e92b5c67f55de92ebcef8f690d7b3892362d884f2d8c657db5dc308c95a43cc42bfc5449b45c05e9e60ca5d88d0c07b9cbe6b76da91f7c572e1c02ef71a18833e6779df711a4104e21d5939a982e19e22292df280adc3f0b10339f53fdbc44356a95c27eb23932302678b86094d5f4d60e028af61c01d7fcd83ab9f78c4499c3e7bd29507c397ca43d397b90cb267a6ec15f37b50cf4f2d82d4a4fe8f56355c27c20cfd93ed5f84f321244c7a7dc404619b3f9bb83affbf4d1d702b336ac3e504ccb86c18a979354faf0bf4e725fe1ef051dca8ce0209b7905f8f19c5ec51fbede48f57cbb90d14d666ca09fb4d0b92c6e2a54e8ad1b51cc20cbe17c86901f76d509bcbf0d6ecbf08685da20ec75c11d8c509cf2ab9842e2be34aa24920d4a035e1641cf3d5b1669d46ac9531514d3082032206092a864886f70d010701a08203130482030f3082030b30820307060b2a864886f70d010c0a0102a08202a6308202a2301c060a2a864886f70d010c0103300e040806de2bcadc588fa502020800048202800d03f420c35a4b8e1b3b0592306996feb16d41001d0aace08d4dadc51fb2498f504c4bf57a54eec39102d76665eed9c46006c9a181bca37c64e96f11b0c7c24bea8bdcdab174ec1aa2f85b6a0ae4ba082516e977a212ee8ecb5d79b7431f951749046ffad4fbb2106016cb024da53894b7f2c7e0b8d2af6a4823d57d30b884fba32bebb88c0bf53f370663f37a4276750ee22c2a76fb428f888dbc1bba10bc0976c7a5e73181dd84aaccfe98e2fee04212f1dea2284bbd0fb990646fb276610198eaf210d44c63d245234fd6c7486d2b899395d75ca569f4cc7f1c1b9583d2e5a3310ffd7826fcf206cca0fd2557b9317ef638e5d553ffff917e41c6a3f184ca72a1581725a954f5ed157dc9b04b1f2f044bc267f9de7e4d80aef84b91a94b66dacf86ab78928c873b2b8963ef1b2fac24a603011edb223aa8aa22bf3784e6938edf7811516ae4862a77693b1c254a4ed30dc85bf4b5a79942f841dc09db799eaa89051fc51eb917d9faa9781af961ec34e2df5ba531628d777437b282a2548d9f64eb72069f0325cbc65123c67606c0812920862480457d0df6ea547a9f778d48b24b6ca72d47bfd4cc6431e126a43c8d14ecae263da06bcb73413091d154c0e67fb6f629131c2d4a0d1b750941b0ab8a188ddb4cd427396d83f922bee0f3a85383d5bcb8ec89338b933d181aba79d7f2566e74b9a01ecd755ca4ab38963fcf36c985f5513ea678a822cf8acab673234bcc3d7b210da1b762814a0cf658e5d8ec9305b887d444131278f790fb8c77f3737c5f8f864ac7554bbf4ee8c3d78523462628faac312e2d37062c72d05ba2fed1a51c9017a75160cd267897802463e638a8e02c2a2230f518365470aca7e8c418bfe99227ad13f0bf2bf6d4124724af314e302306092a864886f70d0109153116041467a3f379b2dd87441f6abf68c9a9f8429a92c044302706092a864886f70d010914311a1e1800740065007300740069006e006700310032003300340035$585f5cfb43702b6d02b55418ce3925d04cdbcc63", "testing12345"}, {"$pfxng$256$32$2048$8$eda9c105494d9435$308205eb308202c706092a864886f70d010706a08202b8308202b4020100308202ad06092a864886f70d010701301c060a2a864886f70d010c0103300e04081e887b937597d33f02020800808202805b10b55d2c64713d8085b1b3633dcf1c87b8a20fa9a3c2728dd7ba8dcbe084539bca898b4c884e3a7ff15458b155c50e7a5ac6639c300b8234e4424819e167283160286d53f8ff9c78e05eeb250e16a7445e24ba87f0e3a111ee3803d56c038372fdf96311849b7a35513ab497be68edf0e306ae91b3a8790a0a6043e4051b4b2b90d9c2ca1b5d51fda2bbc6aa136576e7db2362af99c7be31947ac1ba4dade30c8a4c5dc9ae517528ae7d7e34e25ce2f7f3a42c9f0bf6ccf71d84a1117b6acfc95c3493f1937d42c019c7385b86028cd1795e5d991e208aa54d56499c3fe5b29ffe97f8194fed46062d89127553de7f73717435e6c27db8c9cb6a8f06083e9d17c00f5afc54c2d6c71d7f4c9b8747bb53539c40a5d3cc50c1c21eb30a28dad407cc31438337153db91c6b9dcabfbba121d976e9deb182b530e94b63799901f02642973e3ee41905f9cbbbcdecdc18bb4a7e5cd540031cdaca594602ef172c91ac4cee0c2e86cb34b177180e05171e22557201ad9376f5fe99119128dc32e1ceb728e8ef1a372c5bf2649ba379bb9ba300d6e9e3c7a37f5c02280489083b729c6beaa85aae2cab3b05c8543626118aa3ac42f09fac3b26898ac9762ed88ab4099aac653c2d54fa967179893408cd5b61e2ceda8e6f7789752d78357f1f9762afb621d2444b2e8ae5a09596d37ed8309276ffded09434616e88fe075d2c3d983f46b0e1b4f330fe2939bb9985ab8becefabb3929096c18ca8ab76d65eb7642454264c8617f11d3211a24b68a5cd146e88581567681f5db43d46dee2d3c3f0ae1dd6f194691331fd6166bc4ee9d076e571a30df4015037c7c831e5b837896d5132fdeaa4cbff9fa372f873cfd3e5a9037cc3b95d2b0d5fd2a07691b1fc27f604653082031c06092a864886f70d010701a082030d048203093082030530820301060b2a864886f70d010c0a0102a08202a6308202a2301c060a2a864886f70d010c0103300e0408751fbd442d0cf3ff0202080004820280fc073ddf174703f1be2d4e2927ef21318267a66f2d1c650ed82123a210f14ad8b8cc4f5dbdae189659fec6748f8b12977dbf586f313282ef3884a2eae3fe6df56a4b335c647764ede9160063baf0614d153ce5356fcb0c41ac97eaa30d49c5156064f56a481dddab298feaa96db78263251295013ac8c63a0958c60fff45301571598659ad292f3c85691696350a3d4c3661cfed45ae243c51e333872de4535ca30819b9b71cb70adb05853757ef3c26613ab622fa828e69ba99859b5272c853afaae61ca7787bb5a629ac7f4fb7d75a854545a559dc06743c7722e848dafe57bde7d2179453f618cbe194cef4a1a173656c765277ea0406ea030efc857668a11a62ae61f3972b46f324b46cb29f8b5bd7b54ee1dfbf3c87ebf1732af3c6304a3b6a3fbb8270ef8c3a8558c942fe018f94e0127cbd1e36fe346f46fdc68a9634ee3afdbd811254377b3e8f71c43c4f5e69bc3655ce8c8710da0ef576c2f6c00b649d32976eb5562b2c7df0c646a36f11b86946accd9bd018e81d5dc7369bdfc1ce8da7fdf39a5fa9732963c0cf81da9d0eeb476e824706151513d3f7b5b606085b900fec3a1e0fe673ed413ac6713c81db5db7080d8c3dbb426299841feb3fd58195218d01f598e2d4ac41376adc768c3f68110f1453bb30b293d98e980ecb325fbb3f87dce6e897f407a2b32c49e2329338ffe327d1c3fb28427af1c9dee4f7e5714ce6355357f8c9634c24922a70b0b7c7fa8ddc9bd55de97c768def9b04e694aa26a5f4f4f121fa24ee62baec2a1a70db99efda22c6975e11ecd9138747e6366ebaafa9abd02c61bb2b0189de5c315095bf371a397c654f0b0a8e4fec15e01bbaccd3fef22d6359270098af58e6c2f203c992fc093b08f932220d769700593148302106092a864886f70d01091431141e12007400650073007400310032003300340035302306092a864886f70d01091531160414ca1ff7ddc2ec65acd899d4ccabf13d4b72def78a$c47c2a58c174a8a3540d6103de3403d93b9c66b6769e74022b52981ff4ae529a", "test12345"}, {"$pfxng$512$64$2048$8$d76feff5e054a36e$308205f9308202cf06092a864886f70d010706a08202c0308202bc020100308202b506092a864886f70d010701301c060a2a864886f70d010c0103300e04083babd1a7ed8725b802020800808202885a0ce329788b3c935db5ac040239817531e749b8e2e7eafe53c45e464cfbe27c6b2235fe934e8be0464dd0c8861b31a8781c74704f2c5c109771b7a3ac14c978d94a53115546df674e202cc4b051d2e094ee59afe3c81b20c0c9ca5c6fa0b51024919844b7c94aa61f4c7ed4bd238447f82657b9bdbb3925552c6692532fbc7ff775b71796118f7c5d6c243ae5b0f7d4587d9c079498be2b41105dca2a4375e5651dc1d3dd5a003a62a9669f3931221e71e2a1e9387dd35d866b3a81513be6134a4e192593447b6965d40e276853bed834e04d6aa68139e29a8c4981665cef6a1b3690f81add179a84f5bd5620093a36270dc3c4594d4e167b21b183ae5bac73484e7f2204288ec80521a0044730120b04334af45f391de13f8b29b9a2be2c9d7d252229d0e0ba3186f34b645b392efafe60c9a3a34b7f30e5fbf7078bcec794418ea2ec184699bae59f55734c47738b448b99d7f69815792642786e775491603ed921e45386892e182e5978fac39c37ca4953553a17dd549a065e7596720ebe6e01a87cea0b53210c540483a95bd08a8bd713667e27e37e54ad5bcbea66642e252c1f400ea8a1cb08907e80420b3778a56b55c154e420ad480dfb94fcbcb79a4f3b06f1cb41840de6d6a2f60513b69a5477bcc83f8b771fd40390b622ca1bb21d07df796360e72b2623f8628e45299ff45331a9ba80583665610ad82bd658e7e316f56f810751d95fbb574ed6d97dc65670fd076cd2092c63ceed5cb800c131a0971a74fef025cd3c3aa7a5aadd7724c2f84c879a26b40b7e5c6f0513c01cfb4a5bfea4132d7b25554d1a60e2cf73c96c9f10dce8014652f141e421dfe5e5386cbb0a6d22002bd86ac42ca542893973b497a431ba48d9bf5cbfe39ad0b980a4538e5cb15cf166643082032206092a864886f70d010701a08203130482030f3082030b30820307060b2a864886f70d010c0a0102a08202a6308202a2301c060a2a864886f70d010c0103300e0408c2fe6c7c551da0b802020800048202807903390797cba159723105cb53d25d5b1867c496d6005cf5370338c68d6078e105dff9f479f6402a7d1fb1c6c7ee7c5954990a6d83f5fee29cdf6f4bcb26b0ec09bb764bf3ddf2545e6569ade3fddb0f771cd832035c9ac3b4154af8f320e726907259c7a43fe1170e9244d2c314c055083da11860e1fc692fcd0144e6a38d3933e2801151e2cd4cb58211f4622dd8248c895823480ee77457e3ac86b23532e4e18a8f144a7906e80dacd3c2f4faea0df51e6e4f8175e45254e03996aa9838ad1b0e1537509416d75ab767e1f1d899afaf517219cce58bcce261da621863eef0a9ffb3e338760f76ffe46e48a6a4e911e85a1097023c6271f2c75b319c66c769ff16a75a0cc4bb542c74ad464eefd60d09ef83e390cefa532bf1104ecb5867a7e7e84feae114e0558425d3cd570f15447f461459ffdf1b1cce6efbb18abb734752066146f5aee630dca8ee8b77f6dbc9deb93282d7e6d559f3237bea628c440cb62bb950cf016b83c4160e386e5637cf35a15b695fc82ef4d86e5d559eff5572cb6c3f7cce02a256f96127333276199be9d187c8ebacbc2a284a365835c326aa38bdec0d0db2ca0cc8f576be802d4cc965d92a3fbf30179bbdfa913e6045ab3fdca5673cf899be130556d10cf6f6d0add3a5b622485e33cd765b95d94ee8f9a5146e6a9ed673d78c0c3ea2dbfca673fd3fbe9603ccc3f75788ab3ae6bbaa5fe547b2995d3593b30cd0efb0936e0cd5e525b539238edbaf7c0c0594f7f012581328fcd655d0710af1b83e4997535f9e99e9daf15fa51e521106de1a7757bf29c5af920fadd4540bedc06d6c123b5d9a99403ca7b4f4b7d422978055faa6aa7d046ebbe7653b8636ef13bb7a4c5909e52a238897a84af13d1393cf5a44b8b4b6c2314e302306092a864886f70d01091531160414ca1ff7ddc2ec65acd899d4ccabf13d4b72def78a302706092a864886f70d010914311a1e18007400650073007400310032003300340035003600370038$41fa1aff8851060cb5db172fc5b2143ae9a524148582d381599aabd96582ea1be2a6054dcf4e1a5c27566e7305a7b9a2a94ea83153f32c7c78efd57649812303", "test12345678"}, {"$pfxng$224$28$2048$8$8ebbd2cf95ff5f8d$308209683082041f06092a864886f70d010706a08204103082040c0201003082040506092a864886f70d010701301c060a2a864886f70d010c0106300e040834ecec80694e37f602020800808203d8ec3f3a3fc8197e554166da292ca155e004828b46083a2f0a242f892f587a78db4165097c79ad56534887fffefdd1e35eeb7f129e6974b6643fa9fcd6d6e3228f4b37e1da371b23300d5cfb868114c1e1c61118188c32e3679ee23d9bcf0adce8699e3d7fdc36707a66efe61ecfada1117b4bf3f711a6e9eab20f90882d1c4f0928fc753a44f822c6c47f3c7896dc26fba03341d8b902fb00b669c661fd4982888f264d50ceec719aa14acd8393720a9127f3f44a93354e5c8477c6cca92e7755dd01d3d6bbca09bb549e3375c6467202bd2012ead920ba47df4851156f629fbbd2530e89738854e2ee2dcc49485bc930ac43156ca5f4e09a576bccef73095f8eefe811a71ae95d745c1d7592f0f1e22da89852df0dbcd30d42c3622df9f6149dcb76523a339072c07b10a1922e66d2d98e3991c2053b2f8377be7bda18dfc9205cbf37b37c370827941a038a34fd202a7e779a482cafe76de3e09e8d104240e05c3cc87a71f5527641b3d889ba9b685df949c0fb764713fdcb4d925dd86939dab3237cada323feb6cac95e4b7e68aec25cba82e3b458fbbeefd0a48830137bf962a451fc451e980e6d965bb5c2be4d6e38460340d6808149fe0fc114d83baab0f52cf4784c0dc98d9f93ce57a5f99a5119b9eafda12f520d130d9400e5fb317edb0b339b419973cf3e788a05bad9fe7d4b62a3707af4dd6b825c41a814edac407b0b48d723bba13d4768da5cbcd4d460bdb66e5596d518a73c6d58df139e93b12e5d00251f531d0bebb754acc343d1ccd6e2baeb650fdc70a6be0f3f198047020eaa1e8b314137af4a2f62bf1a066ce28baddc35bf6edc99a4af9c0db847cbeb0ce6bb86d9af7d5efe25f67829fb865192b2a672e8e7d805b085cf8318480f0308fde8d3309d38d180393fa4d4ec9d9c65ce22505c10bc05d1e2438dddb53aa612b2654e8da0f6370cc72ef50ef7d6b894300866c71481e15c68e351132f5ea53d1925304880fca685ee87c1f022d67d9c77e47aca9c81132742171e305dbec6827143bd1bdc2ef48d43cbd083150a8735ad7d4b954b891036be8b86bdfcd4cbce537924caf91241bcf3a95e0d216b6fe91f5530bbaff55816041fd03d0c691a6ba4984dbd5b5cac64bdd9647b84fc9f413bbc04da159818651d1e22d8cd217ca0f48a18d99f87f0b41816afd28db0c47b2293015d80ea1ea01a48b3936ebec85f8f2eda8961410c5a9a5e7748dba8d309aacb9cfb4a617bf68ee2a2dff5257ca13ac031bd02c0cec99207fc2e603f80c0675ada7694328d4ccb0d464dd83d76a2268fe62ccc95aafc097f146cc67ff210b9f8f14a7886d81be7473b72518455827ba88ff6436ffd2ce389650248cdea178c1fa9014cd3c33082054106092a864886f70d010701a08205320482052e3082052a30820526060b2a864886f70d010c0a0102a08204ee308204ea301c060a2a864886f70d010c0103300e0408e0c25fbab714e22f02020800048204c88e61de00e92f62481711cf50e9845f057d71113358c0ea539f5dd8a6b84138a30454022476ceb5be480732187553fb94898240880b2918c09794fcb8261eddadbb6235fdb4cece8d57d684af8c3b694ad709582c7db46342f3945b697e7779299986e5786a0e081cdaec348ff8425332fa9f16a483ca04b33974815ed270ab79ac3dab3d814512333996ea62c50f15f6f75e2b26a717cebb0999dc76b19b22171d61c5c9f132275bb3f606df0766e57f036ff5eb838730d4595ff7920b2028148ad77eaab5d4c514e5c874e4b8efd662f6ecc17b3e28978194aa71721f41580cfe255b83f04a085b9ece1186ff079038590ae6c672aca977c3f849f43c88101ac62124694d3a9de6326f21694a0fabd64f98e44a3b7b11da1b6aabd403dea603b3de32007d974fe300c15bc7d39d937314958d6cc99890c2e9726fa0050574969e31154301155d83ce929a8f781f60759dc1e9dc7cc108caa537f1673bb5a6c2f39c807e73fa19af0cd030933f943b613f59cd18c907c1e58489f37d4b92dc1e20499614085802f4d408c201a5bd19eec7e8b4c6d640b7b295e656fa3d80ad35b07b4ff828296a4ff87cff5e2300913774ce3f08385ff27841ad6ba0de0899fe7477d05911e47ec9ffa10e7f7a5aea5cd9890f8e790cdb6c7c68174ea878be69baadd95fa9b6c9b516bc1947d38568711c8b879e978028fc02c4a3468823e1aea7964994e416ca6dffa551005c7899f8afdea00d1b18924153eb26ac2d92c0798a97978dae51592aab90537d4d0ed7d72dc82d074d0d765be9b456903d1eacae97c974d59ef9d9b4c90ec2994228e66a4fc8f393a8cb0477811bdc4179e73be8f613c0d4b8e9ee636c623ac82bb348290732482225376b6285f630a74c75dd89355e27af97887fc67e84855560515f914aca493f1032ac147b511c7572c8d1e5452350823b0778431c7d83e173fe6c96b8c8a2bacf7ed37194f14d9e69d8ef9e0c1669fd8d781931ea8966c1eff82baa9b856bf4f48eec8d0e153d6f553d909a730ef7d72bf40af843a7cdaf21e034c8b2ce1563fbc8e59af88ce17c1391fe9dd387cfd558438581b336a78c50e9e28721d9cb7e9822023845648499976bab4486a2c17543977d3527574d8392764041c7167ab38ab994e87831b29d79ff6e4e07e188d187a2b22f3ce002457ec8142e988c69b83fa99a273e4e4f1a976833c2568d00904e9c10bd57da6106cf61fbe98a8bd7115b030cbd2cf44e447271fb9021a8819a7620af7f1e0b35113cd0ffb6e3061b73b82c4c4a5447f3282c571b9bfe2b9d802496c7a0cd2929127634d11c5e15b847c022bfcc693b941a5dcd7f20cad5aff5daf16b68d9f8b70a2e5e37de32cd3dc21352edc11d4056192e0b47fc9f199afa3e4114e77f1352e43ece8227681c4a95a4f43ba7afe7d42fff550c8daca73cfd67608c42772fabd4c418da44c1b282ce29f097e74c9f406f8812b6278e6f782e31c7d6f92d56ddf957b900632e1d76d82e0a426d0400364034b40f13e76bbbffb05f97f995e053b5beaaf94c2751afcbcadff2c9f744ab1574a8d627224adc6205043068773afac77d9386f28be34cf5e4f5b4f573bd2eabae317d29912503b6ccb88037969f508a26ed30a179d1c93f328b575fa54ad408901344ff18521e1c4791ad5ac7a6c3269464daa8108c8ba59b18fc52d81e5315f4b5f00fc822c86f625382963125302306092a864886f70d01091531160414f60526b8d97f1ae083d90c655dcd7e0458947577$2c157943e0cf1b4e3c32792d1a9c79f3357cffb522db8e40542c727e", "password"}, {"$pfxng$384$48$2048$8$6d9ec1eed08eef7f$308209683082041f06092a864886f70d010706a08204103082040c0201003082040506092a864886f70d010701301c060a2a864886f70d010c0106300e040810d1d87bd25888dd02020800808203d8298c7b7782ea470f554960a13a31719becc0642584f1c782652d36d916542f248d68ee6fed7f4beb47b534303467805745e7a4bbca15b576ea23785e716256ba3fbde3c260d4ff31d847cd2c7c6961727f2ed0eb586f3189897a904d7db1e6887f01139886b88d11020cadfe7eb6a947f03c142a15496a9f2bc4c25bca36b70f5d3692e8426bd435a57ef53beec4e6435dcd4ea10c26513c75e0fc7a0f746eea2c796250456f1ea2dbde549d9402b03e15a10cb1bafc8bd6c6ba36b4f9814c037724e963fdb75fd8ae3d9617f87eeede851d7d3e499ddeb11a8d8d6218727e3fd4c78199e0e16a95f7089080cc35829cd2cba89fb01a6d972b4c26a6c4d63f22bd737446651ce6807aa4240440ead384cd821b95da4ab216d80108a96ef9901d695cbd3b357756f7c537ad7fcc3d0fcbfc2ac76cf4b2737d581bbaa61eb1664d1052fc49be8ef85245940f95e756783d38003719424724b34f5d70a5d715bf644bc751170be5b1bfe8d3babed56793d6cbc92cdd6d2075080bf5adf6f5a93933e8febb44a06536d3eb2b70c7e7da0832e01569cc561ddb5de770571def7c612aae2ac6a1d6ce65e2be2db6e2839ffe328c32f66a004f10deb6ef6700e35c801687ad1a945b218caab08daad7cba68c6fdee3fd9256d4726da83b8287ad30382f8349404dee0dec6cefe8acda3a031a8956ad8e287483731abd877d1f8ce694a3d70a70c095dd974cefaa5a03f65f9df96f9e9aeb49514c82ca2e05d98685391532548c7c3050b2b148d06e6d81265fc3968478636a4912ee7d9d59d03c26c02c2c5583a016412edf511bf6c5258da7ea2d4d11e6405a5685a69aa65c60e9b601e45d38195969886059dd4332b3793dea92087316edcccfb114e02d86cf41261a95f80d11f846ee50598b33ccda243a24b87edf39051e43ba252654117280af5913faea2eae58134180e1b6cf0ece48a705cb4a4cb72c4269c31bc61c73d9251d27eff1690e8db033c6980b46c46a5afd363ff8d03b7b54e7558c0f72e3f004f3069eba6da17eb75490e3b380ed4efa3d30597f2ef235091672fc392fca60b6e957523ec3fcffffdef91420be03f53c95ffa59949ae68bd8c5a16443ff2b4013fcc99cc0629357094ceb40543f954e8c1be0714e08ad006e771d03ddfd74c48848656949434980d4f3478dbd8e6e546de0cb0f7f0bbc1f86fe8579ee32bca3002b0ad854c82d16168d3666e70d38f69e30f40cfca65928a8bfd3ec4d4eeec16e8b308f6cd2bc62e9b9204c6af55e8c746aaf105760d2eef67b6a70a04c4e2a26f9fccb9b99f2aab321c6918eb299d96293b392252b9628d5a8f97b4094f66cceca907b30b88b14b3102454a6c6c10f375dbd07eec1175c99b3082054106092a864886f70d010701a08205320482052e3082052a30820526060b2a864886f70d010c0a0102a08204ee308204ea301c060a2a864886f70d010c0103300e0408727935156b8cb4d602020800048204c847efabdcd5bc09bbfd533f7d4b4e89fdc794cb45bbb4dc3db102fded9a0001e60a6ad4b714a3f465f8e04f86af05fc059691c8d1e48b5cd9cd9b7473e9b441c21ffd7a06c67b2436cf521c011cdd0d8b7f4d00758e30323f5714d9a5398b2a2f68c198d3368aaf08b81897ecf507aa3e2db10de58fbc7364391e10b6cd141e4526827d1e0f833793dd09b4de2da52e8d42edc4c3da263eae310f8c9e3a451a6f6ead0825fc6153d12ab799929a1ed37de14f836176aa0ddb35d955d2951939af99a3bfae8f91aafb242417b6de6a0898935fd3a27fec1ebcd0180eefab2183a891432a8946574273bc04389df75b98962843d8d4daac5053d2755bdfd77f5832dd0aa1f9be0b8846870df70a7c179c316f811e450634c99db8e0c57b4629dbdb23f2051dbaf5c9c48a11d512b836f0ba557f1ed580d848794bf5c2039afed40038f76dba684ee0b55dfbd0378dae28a5ecd722a029f3bc0f21b2319541bb90da406b8b946fca94784cd6a1baba56b2abf5dbb512073933b2b27849663fdcd82f6d912dfbca9021d8cf87491cd08092035b2ee5c44b6aeaaaa798201a9012a558d2cbfb63fb40792bb007e6de2a571b204013665d35896b066597d8c22f11545fe5cd814e53d66328fd083df315871c86fd2ce9ac92fc8c694b207c4318f6135b3ded8e0b25c16214634140afe47fbee2292d1f314a1da987f98e61e892731ed9919774d96dc0d2b0a2db4ab4f2ad6f3c94efb9b206cf5960df283085938fdc953a2372fbe8d5dbcfbad2c552aa0f93fa4a0ebdf76a6237c34dfbe07df0739f1c4c3a2dfaa8093cb9077f42d70c8133824ee3e3945f24898b4c2ad31990acae6a9353279f39ccd9de91d8283fbd2c628e8bd079cb6183225ac72594bf2f33edeacfabf15d210fe321b4dd3cb1adfbeba31dacf4d8c7f10ea240a491ded051e7a992a8bc5f10552787e54ae366c7fb87ea7389f224477837c4e4c93e3c5797ec67aa7d9c999ef3d155ff55846600809986420c738c3af8e221b777b7ff8fb36a5fee9c3dc99c51ea57ce702cddf7cb5ff3b3b8d942a37c2058f3b99333ef312a6636f66cc77e54a80ea9bf05c9df70aef68e93dbe5b34e5d009845a7483c95eb624b0100bcbf04c6a60098f82ab643be533c7e012456cbe7f9b1aa3ab45db4aaff53c80226e53ad5ec4f32ccb08edd858c128c5efc43d53a075ffe10295bdbf39ee91fb95f94374acf603225835e24ae9ba95c6b8a911984cedb1c070d2ad5142e3ad9768f5147afe7f5a5b85f763fed9a093e2c24f920e6307a9e5418293b4777177d813d70dbf2feadb7a49b382dba1371c9887fa895b7fcc2b057aae2d6e40507ed33e368ad0bddc6503781b22023b4c41a9ae8c805a9ef83a33cf34c05d6a84472b2e7efc5c2dab12b59a474d1bdc1c015c0ba61f777946d6e1fda7fc4b2cfbff26fd6f04d9f438483adb1216d86863108e3c1ba17b9354c914d7eaea2a32fe4616d911e09dce3015ba3cceb87d5da150f720075d50897b213883942b1fb3fb206801890e695d1dc33c04f340de718db8e28dc4afcf9ac69456191383b3298db189847afb5a647d5bd5c8b639954b1d261569bede67af7f823172ab7728f714ef4e63f04e4c3596f86c365862cc11d95848725b661a5c943eca98399fe9773c7fb4371fdfe44a202acf252d8ab93a320e11607e89720954ac7bce878adf374bb2e8b975d1a23713125302306092a864886f70d01091531160414f60526b8d97f1ae083d90c655dcd7e0458947577$f9dddb4d707010511ec6e2ef656fe5adf3c24973acd0f7b6d71f1d1aaeb336eb62c6f26798e52fe4a1a743abd035c795", "password"}, {"$pfxng$1$20$2048$8$7aee6a7d66fe6625$308209683082041f06092a864886f70d010706a08204103082040c0201003082040506092a864886f70d010701301c060a2a864886f70d010c0106300e04084bf94df7d0744c6a02020800808203d8b9624b3516cbbd4dc4de04f1e73ddd6154bdb1e4f9f598461fb55c8959edb1afe9d9f354a0ac7d9394e2eb6efb0e438032bf623fc719ea951e9a40dd66220c5ec41f97a71d5e521e38f75c6c1705dc21e6f6b1e055e2d4ea6baee34a5acf6ab92407237b0a956a23e74e0774235804df0753d50d482e354eedec2d75b6a640282e9b1ea5cbe6005ad0633a0694428bffd9cba0711197cbbffe7ac8006b749738d272f3c22cb61d8d9c635d8f3adf6baa207fd1c1cd864b9f3e226c100c557e55a26d6a836435d8f4cff21133b1102f06e0d447c9b866e1f2f12c40910440a3c523356389b44f41a1e91652105d966e0194c99dec35965f4ac437c66b1ef3913c59036eaf431422199f416a676cbfef80ccd4bf714e5056a71d14d054343c7afff7ecea9c926eed52302516ace0a3fce361084599c86cc7afcd9a0ce31e2ff4fb9b4e9e6db78cdf8c79992014b9c14c234dad608a151a904dd6190a28e8f3fcf3798d50d7139848bf1a20cec45c228aca393f83aebaa26c8c0a606b3922ce6f299b9cecb774c5b96988f4e719e102e6159bf7b2819dcecb47334b6dccfa08d9941e57e4c4985f861c7f7621a9c9eb697641ff7c180f58c489eca1503409026d00a8db9db99149494fcb57d61e107ecb091789541948ef586f57461150ff85b574dc8dd0afe5dc5221aa021dd00c5c42ed4c9769c367f6e6a2a4b3dd55a19514ecf41a24b5ca686f93627c3ab6e931495fb6b1d4d55f9b9e187a3c1ad946102e72ea625d938ac362421f55bbe5cb87426e04afbb45a0ae54901e91c0f2dbc5176bc3799e32638fe1cdc127c85bc2928dcb0c4fe4e1af980235fda918d641a0d357d60584be5a8556009099750e76fb12c0607e3389fa48d72e10b5872cb055452c4f7d60d9782ad5c5cdfcf15157e4511d1cac0dcafdc522996c9103aa074d65fd457c7990c1801381882437c4ba7783622c4f594d97b9138318e04bab564a3e06bcf2d4c12137accc3965e14723e4c255c8fdc7e4784a27eed15d5351486654cadc65a7a9321eb32890be6932c4a0b0ead304e9d9111a909dea06df1dae46d1d60db20cc1026ca75784afb2c1a79a85bc9fe4d18990bbe19147063580b59cbce4e9167c3cab4622d0ccb7463382c734e5aa5a2276250d266db1bd03f85edecb30949e59ea258885c39cd145dad486f4e953ea6f527f8eb11858d7c61f79c62a9113be7c3c3a5f6b9531c76669ed06ec19d7dbe2419856801c11b12386a9fc6b60ce16b2cb13b8bf9273d25ca37f221aaa538d4cd618f0d2c7bb56cce3bb64e3aef0bb6e16003be8f4c6abed5c46a0e89e4fa7628480ceadcc8d771b44d84f9fb4b32b10ee19698707fffebd2c04ea699209e0d7980992fcf63082054106092a864886f70d010701a08205320482052e3082052a30820526060b2a864886f70d010c0a0102a08204ee308204ea301c060a2a864886f70d010c0103300e04087f3670253d7d7ccd02020800048204c80c7bda1e28e835f2d86c751e4543e563855e6583ec291b7f5ec1057cb20fc967384a405f8736db95c10692fb946b617cf08f52221a4c685a77225f3e5b0936303069b898b9c2e0beeec48ac4891107de2d38f9718eecfe9d5853c067c2b5386eb971db1cae925cc0b2bf01acfda8d7105ad8589f86e6829e37b33a59564f582674981992e8a035f62f9d48e963a553ca7ba9c0a1b748dd3a091607476a67e45d29dba9716ea6bcafd4bd5d040b45aed85e7b74b8be43c9b53f30b8d14dd705a302021c6c9831591e50ef01c3733a750caaee661e0bc9aecf9af5da661c0aba99aab5e349a7e0af248048b698d704c04271a0c67d97e482006bedaa7d4b42d5cf486ea34861acd99256bbc638683839d55188391cb720af0ca5065c6ffc10b7d2f9714f7bf391310deefba706a7cc639365c89b927bcc156ae7259665a87000387ff928d7813f43c1af9712ce4c05f646a9f20eae93e172bce2b359ffcff56ab598eda72ca0b9d755f4d5c464768ecc9b450900e0be7b10a5a1efc0bc25ed1450b530c4c3a441f775d1aee25256b8470a8a5748c49a42f97decb083ddb0b595d49d56f61c5fb1da5b0167e7832876e2e374e03553d93f835b23ec3924d22ef15afede70f7b9f1bc0385827814cbaf11a9835eeeb6802769e1fc4bd17b5343015a147d69355bd15f87027fde6629e0bb6c1d81b177a0e753445bb6ae75a9dfd4777ccadf8b9a8c452e3d48e021122b5cc55de6c247d82b3dbbf01c788ecc3524f070969be8c0f9f2600ae51be4939c1604a34332ffca45c722c6d4b67cf160588572ab0637b4a2af48d53fb779267e8f45935992e6d3892ce8d5d07b7d5a7e116c262bede3d04eb99ae3f991f3cccf295fcabc01cdf1d1d3a0d5d7d338433198112b94d620f930872db2cda70b5537d339c988120c0d4a182c1d7547350519c362a5a7f63031ea79098863485912333e2d8e1d6718f9fe71a3080389377e1706d0ee0358ca9f0700d0108a9f345f9ee315f97fe0bffc79668321cc0f955cab9ac060083b46882741e0fc258060fb33de74110ba8a7f180ef65aac011da9d68b9a479b6264462ddeb895d1595378df5b237c07dde2147eb645b12436dcbfe4625c55ff1cb53fd1799b4bfe400ea3fe299ad8b07c92937f388c1432fa164e196524d8dc6e1c51838ae1817cfba18fccd59abe6b6ca36dc1af115033b00035d46b0722da1b13818c525a9d210022e925f73c59d785e48987088e789fe53e22520f3ef3583776f21cd00562527a8e6e6553b8f1bc07ca8fc3bbe721b3261e7448ee5e3e9b5fbf0999609708d8acbf2ddb3e8fc939b523f472af686f096bf43cdf0278db2ddfe5a69bbcf8a179a769978d0e682906e5a8544a6781a354da66355f8cb3ab9d1a5f1ecc19d62ab70e02835b2e103ad8190e5371f147ab5c214d371c2995a5fef9ac841e9e59c29fdfd72648203933cbb4be7794bbade5f22e4d6f4a6cf975545e6448d29be17fad043844a2467b936b0c86fa3ae532d8d050288a10a0597bd7174d3d96127b200f46882cecb64e12c590a37cf1a956680717fd65fbbcf7ca22620d4b7198fdd05e6d089c1ce61e3d85ede4350457e34b541402f6eba9ffba17850296244b5d3661fce38998c244de4b94acdf57ce2339ea15218c4983c493fa5b0a7811c4a3e4f1523d929ec35aa1b0045fcf62788ef1848faa3f08015da2dc1f572e9c002183125302306092a864886f70d01091531160414f60526b8d97f1ae083d90c655dcd7e0458947577$683061aecdc54815e1d34d14f5a55d97cc11418c", "123456789012345678901234567890123456789012345678"}, {"$pfxng$224$28$2048$8$3f7db8efcc171c50$308209683082041f06092a864886f70d010706a08204103082040c0201003082040506092a864886f70d010701301c060a2a864886f70d010c0106300e040887fe7e5bff5710e402020800808203d88a0d5dd158f1a9a5661bb373a396a380e74285cebc4bb40152ccb22a39ddeac8f977b0383569b292759a766f723b7695494532875ed288747050b8a9a096da8d45f11505e1c9762014ca4ea88d16244f5e02e83330dc377dcf6d9af0b0a1d3789cbe1b1ef042eb38851e055fb79fe57dec96a366274297998fae9276720f62807bd37fe0d49b2c8776c232f6886a4c6508993cd4c267b8534b14e454292dc157ff95d27578a0a6f268fcb88f793039aa8d59a1956e6d4a46b9606cb31f2437b627c6ac925754d65cdcfb6bc9271daa3baecb7af1f32ccbadb0f639c45e767428329c9c60d370f81e3bb06237686f5844aab25cb5f2a451f5d13ea7a767a9b9bd89d9b1052b06ccecbcac8ff68c764a59bfb939bfcd2e21299019070437a2ebf1f69646a2728faeb4f54890219a5945f0079b2af5e93827cf1a8c5ed41cf108ce8fe6375a812c3ec35f3184aee38bd5092b6bd5a2c3094f9019b64295ab46e54d02262aace92287228b0794bcd340069e63107c3592ad411ad4e5dbeb5fea25a2838fe04402992e7141a565b0660211ff5ab69f2e3268ff4ad4c8814f34cc21e572d7a6cb13f86c593f865a7c82cdeb159dc3d84bc5413c235dfe6dd0016f1eb59a9730ed4c4417b2aa3394a26b8fd7852e70d0afb4cfb674a1b050e96dc51df1db0b7f885173e94db928b70724d89586af381c6b6bd30c43023c7700e490515cd799ed7bdd26cfc3c00cfa4162cc0761221848029c336c05208cedec21c194f9641aaf03b34acdf240eb4fd8f125c8803b60c0d82b91553a565efe46013da91bb4437631322ef32dfb07e9021d2fd58a01fb10acf0ebc15866adab5c22174edc58ea7eddcdbcb0b0b5e39cc2f6ebf0cb7c1018ad4704f4b170c47f9db713b8c444826ad686daed3bb4019ff906cebd65cf96fc4840e43c856f9ca45d0cb0528ffaf4f220392c03e0a6c34f4b8ca959d838c3bf059cc877f37c257769bd89d93624112c1e860c5a5ea20f10fb218adacee12d94a8cb3694620fe1eba547971ab0e22f8765b5dedca8ada83c219e5ea3f0d32abb45dc579fd73ded56507eef8edc71a67483daba2427e7761c3307e2ed0c809ccfe3c8619c9ec9e505db58663d1e456d4dda80522e5a4d6f0133430b6fd56856241091ca00920b928cd96c64bb14214a4cc007320f445909e01fd0f1340c64be9118f60fbcafe19a18476806ecae7692e3b527e8fabdd24200727cd2200d6d67606023b9f453ef49f802e9cb4e6820805a60b1ed9ec0df1547e707e22f8466f75cfced0a8a522eee5c6b4f48366fdd7d0ea61addd5e62d23e1aa3b5aa06989064f1ac1a64ebf670a8aa714f76ff86313b37927337dd2ac76d14df7dbc79d2407669a80a4243e3082054106092a864886f70d010701a08205320482052e3082052a30820526060b2a864886f70d010c0a0102a08204ee308204ea301c060a2a864886f70d010c0103300e0408d00ed855d37c4e9d02020800048204c8288d70572c9b06aa500540f5ab37039a280bea3d111236da9115531cb602b8a25ef3c372e5861f5f65e5a5532263d676e5799bd20563cbac0372be86ac49599cc4ba9cc8c7549e1ea3a9a48603644b3bfe73f59c9f801dd3b19a45002685b6414ae7e5fc4df9b39c1a22f539e3b7a7b1298c92cf7e151ffae9f30c818fe2812016deb51a635132c2451749d15a791003585e27a7363246d842c202b5609e509e6bd722c61007f1e0a3f8bd844a16ab1e548f324026a2f3147e9adbccccce45a06fdcb71c6aab442dd6de4bf63cda0134cd485f542baa20db55838271952abf9f06c4742d1751b1fb79633b120e0481276dea9abb408fa8954514fea4ffbc5710a2bff96142bc0b5728b55c38425715e807b92c5c65c1e430d59b93b718436b773a3da1e86075d1869112cf538c33e4751608886d1e895fb0a217c812d0bcbb0c14e30df68e4bea08e49b292ccb59c2de631b9a2c1a8b2969d59513abab17fbcbb300696313c34821b53b79f3afd40ab9a3149ff02fb1c9c365519bf21a06103bcfada7601de0d3c791148ef64a022d22f478fcf8e2ab1d853cc9f2f09bb88cbdec2d9d2dc5243c57c497c98437ef80c81517ce8fb33c00fe4f63c7ba784d247d951af651ff53462af780999230cbdb964a87c1eef716fea6f20aa655980c00da0e438e22c26c534b7ad8c3f62b91664b8d80cb0834232574ab8e1ca8d436e364a0efb9ddd2d77966b108eacdf67619a7e2ce7de0a9bd437bc5adbe769e01784ebca2142481c8ac0f68a0a0cc24aa602a9beda6134b296e3bbb422537fdb00e25649ea4fb55e63f8fdea9154cefc0d51173cc33f9d83ddabad659b0d951fe66dc2f6b59a6723b988b4ac440dcc126e7d73f7cacf754ab2cd0966392b6821442ea188eedbdf9af27009632ac96d4bc8dda11376f37f4d378182b49118a8b11a964823960cc09c388623ea5f44805237abcb990b694d40e9c729372458259b0137b3f838633927f679cdbcda1abdad6f90d0869524246a56931ecad8d4b1fee47d4117dda804cdd7f94c32fd0d7640f63f9137c5f124f543282935db35357607453493ffbee91012b63c81c19f6c732b99b2ea44aa6e74ff881b38fed19f5857b9bedec3cee375f08d080669694c55c14d1c7951fe906ac07c13e316f00e393cd5740b57f22f03a207b87a4563fd37f1b54aa03c9d94e19621fbc1c949fce6a04eb04a1c96a008ed79cde47fe8aef003d6d2dd186e5f85faa0a2b39def245d2f85ea04cb59083f4482ce852f03f3159df7c83e5971b147a66bafec1140f3e4b3ca95ced458dcdfc9fd0b932388dfbe426893d368c62030dca4c81b21e794dcbd4843d372c378942d7514447b1ffccf89d13983b414917463b97ef8b81acf7f3b850ac102cd56622290c85467372a606a2aa5011caf1a1701058aaed75ce6de458bd6bfd38bc5ad09cae2c07f03b598dda595083325e78627646af854b975a5081bfb246489a01d5653d90c5b16ca98f41c3767495ce9eb5038eea4c3863c3b01aa1a53e82eb6705d521fa2ce1948099f3c641f477f29c1bfa0f5f2f32fd2fd5c2e5a28b05e80e6899cb74f1193e5741cc4dc1d2b68294e8490bff93b587ad265fce489f361e0ef3a78708c1fc8f6ba7f14a3839dfc5bc08c328239dbadb4f0cf0637156aa6fd057c006582e9fd437791bbd1d533c443d405956f5bd4ff1fa5b5ab34f2551288c599f863125302306092a864886f70d01091531160414f60526b8d97f1ae083d90c655dcd7e0458947577$d23b2e7bb22b77fac9424b8a7164c559ca9e17d7ad59483d77f65412", "123456789012345678901234567890123456789012345678"}, {"$pfxng$256$32$2048$8$dbb81801e6cf5ffa$308209683082041f06092a864886f70d010706a08204103082040c0201003082040506092a864886f70d010701301c060a2a864886f70d010c0106300e0408d02d1543dd98807502020800808203d8b216863698fe99b399c476c5647b54f8b8066c95a6422a64f77b3566fde10804ded41663f47a5bfc1877965be9b0713c1e6c9cf4add5ebae35a96682c0c49ba8fe271bb750351f46535ccbd0c3e7a854e9722ba59c7e8a9a74209da359b7ecac92895457ed1ee149dd7c6aa88af18b92ad63ed91769e1760a59c9a81de182192ba642748f7baf5cbd25e909d15d94a60fd5e2cc5676679a0e5dc1dd728c8001bf74c08800c87ee0c7d704775285802483eaab253e0240722bd21244f571a0c998a4234239503daf6b0f33555c8169a54db71d74318c2ed8135e97a7f60fca5e8876cb265db2a4ddcf7180396fd366195fe84f6cb75b292c1c104d3b9f3bfb5d5b17d9c68da934daa4b376a1ef9376f00d33735dadbc8adbcc90f674b0b8342a677f9e99327fc721e4105bc9fe4bc47365e44cc94fe32b5af8e354f204ce579bb24ecda08da219e8a9ddf8924762d57789d7782e06948d6acae25c0875fad8996a8bd7a916313a86854241b5fb600339b24ecfa865d7cb333f460022bad29b8be2869eb006efb68fb501301397ab395b7b2a6e42ab7bac644d24370c40fbb69a4c444b6b2f0abe1cb0f0913a884abd4826a50ab8f352455c71c8621a071028f7b0d1fb144f29cc4faf778c32f2491efcc80832b328352220b9bdb6232617c0d6484fc01b7c68f8e34a1928d88383d1be13294cc6c7c3b1b9b0babf33f00243f50c589a00a9a6ec77edc51450f66423972edf6ddc1f42d970c337d9c33bffcc8bca754df7bc7ac773d88140ad7645d0e34595bcd0f4091355f155f4aaee2e25753eabadcbb7a55ed94c4432baccabe837f4fc446f7ae6e669f25a3e1a3f1e3ea2b9c6f6ee81e33da7c9a73cc31dce52dab7da062e281cd2cc336a858e3760229b2d77f2555f93dce5f222968e5e30ae0dc1be0c7d71d5d31f40c2ea1ef4f1b429d9f33a3d415e2c482b324fdbb012bb4e479cf106e242f85201c6bc7ce1fbc521cb9ab47343e675ffc6b68d01bdfabb90c7b35106653f8753e3080033b4bedec0782f7ec54a51c74e489defd43d13a14fd4555e944f66e17e65f5f206edc68996e6282597618cc12de10b0c101ee6a18bcf81624d06529b203cadd9d41ba0c4d8e3598f7321b81503e2bf22026ba99183fbe4a8e37aa22e7dec1eda76ca6a834133f72441e147de0c878014cffb966044e6f7c1e20e444ea358cedd06a32849e12db583975f0d4bc6f2930da4bab30544a3018042a4876be03d4faddf69841545e9895289d5b38b0bdfad43ff6fbcf9bce989f55a85eaf3fcb99d1cf79d61074bb9fa900733e2193650305b77c1342832da3652ce46f378b9cdad1ddabb9210f0ae399490e01b6ed7c60bfadd2c5c2bd534fac0ee83721c4253082054106092a864886f70d010701a08205320482052e3082052a30820526060b2a864886f70d010c0a0102a08204ee308204ea301c060a2a864886f70d010c0103300e0408740ffaf2a993ffd702020800048204c8730ed3dea5e39287ea1c441026026af0b7971511192405c0a5589ab7fe187dd5428186d039f6beb0b04ec891dd087dfcc7c0b7ac746d7019a95d12af486719ccecbb75ce990fedd592c7456803b7582e4e65bd73a0a9b14bfb61c2b333a9335a15d663f02467cb61878685d4966aaac9069cb2f9ff59e307d09bf687e46778aeef3f2a00d4185ad756390548f68c3f107f0ce8ab13bdaf37c18161cfeb919ece97dab44406a72ae1593b118fbc8652e8f18009b0d508f0afa32e332b781584fe0bbba0a00f059c684af00b62c0a6b37feaac9f903da425ecd3d81336115184aff97a3c12d13fe173ba602793176f9bb241ccf21faa782c3b8c05e4486e73e7d96eaec6eeca4eb92d9c9b0fb02e6f328adffae48a5f1485b7a19917ef88cd8d59730d77e180b30e1ac0d0a286488a1f6c4ecf1ee70c83627bd2ef4023749c30e8c7a0b610d0dd96656d68cfc315e6dc4a2f785d0908ce52bbe5c4adb4e25863f5186e1712304656d1d5a5bf1c0dd6f8e9b110069a46099099ad0e685034638c4c165de239d7a7ed9e9b5325f1bd9036be1ea29f61c33faba9a67d1c81112335d8d67d09341663a9f8a0ff5d4b30be4977b67dbba3c8363f550b5316e124aafe0e1e8c6a18421aa1af559bfc20fd4be1507e1bdb667fc7459450554d53ca7d9f134cbff96dbeeeb6778adade7f60d32d05e1ec2eb69dffca27568d2237ff9ab388d032d1531a461eb017c1245c401422a245655d799621e0fe0f4b7a9f019059f0d9476ceb25001d0fa34ec68e666b8dd49bd81c807685f4f4aed108535c21a8b926626557370ed996ea82d452fffb1f1f4a0a7740dcfa0c20d2f65fbdae0b39f4b3cbdc2b3c2e8b4ebbf03c1bed6e1506b61825ae854abcc196e79f20a06adad239c777715224c7be6b667e343759f41ac9d0e15db35c7a6cfebba1a5f8c3ca87baa8a9f7b3e1e729d4104b41d252242c98d9abf45ca040ca6a683f5a0a129e263507a4af614a3289e7f87d7e2d26ab917ab210540540326e9f4280c51df654dbe6fb4ce4c304ee5fef60afed4e532a051ec3c90c838881a338f6a8f535433a82c4e01d63c55dec0e9ddff46ac4374100a7a57103553c7de7822bda778515a6bfa185c1e6602c3abe485cd9d87a8aabda04619d9623b37bd59da209d71caefaad486ca4ed1759cd2114085054f10f302dbf32008ad6df2d3d39d61d95d0ca4693da7738f5d22f6bbf7789db8e51d7fe9bfcb1b40620537f10e737a5646d61ce5cdab7c389f8b26917afe4bd70b4055b9f383a84c019e1b8e14d089c8003bb129717626e796d7692fed085be0bdc8c52ede08271f52fe369846296892fb6c066c6987d41c89c0a48bbdc9f79604eb1288663ef90fa341c68ba68226a3a038dbda2056437b74ab211c78bef11c6e26171f65b63d931f7b7bb568950e0f74a5a75f00e8821a4f704ec75f37d50155afbf2f8afc6476575ae5bf91e99758a757d0d1f4e5016c1fca114667f79664636a2188ef45ab42b33acbbde837932c3c0835b8e30bbf16ce3bc2c60d7c73472ec19ca9dec9b84721acec2b5e20dc36a0e3d55bab225cb238aac074eedf3b56c3c4613ffb962b4b7e7a66ddb93fb7f7c72f110af40bdf3abf299567d4cfbda4829ab04c6d962df6eef92cd04e43ede0cbadba271652bcf5dd8a682b4782a2b9f222b514fbc2311901af9ef99d526c831c5ff479e5313cc9d21b482d83125302306092a864886f70d01091531160414f60526b8d97f1ae083d90c655dcd7e0458947577$91b12ed2d2621d03a08ff7e914224f8e27852ce491737d1f1e866b30017fdb04", "123456789012345678901234567890123456789012345678"}, {"$pfxng$384$48$2048$8$70e618ee66882d15$308209683082041f06092a864886f70d010706a08204103082040c0201003082040506092a864886f70d010701301c060a2a864886f70d010c0106300e0408b6e1ed3f5e14435802020800808203d87a3aeb70dae99ba99e6420aa49c88e2f4e395257303a50201a89776e5456effc8ad07cad1314d69983ffd8e2d1009e0fc638721e3210811f1632673ea0e70974c52c398d4d1025c80384d979c9daa25fbcc58d539f09915e59d72683eadae739f81225b70e659b673d79d9c242fe5ca88ae9d7730782f5c2c6eaf81877acb7076722cf4af7a7a189a29498b8065417691484fa622d67076768fed288b211d6ab40bc231696ff7064e769f280ca0c873c56e2bbb28269f5ca040466479a05b8b472790ad352cc57f11045a746915fded11f09bc03697b9b1b8969b616a0e94c7a316850651948e49c9798e54208c44d7943d84ce7b9da3842ee5e42660db3515d07ec906fef365b26e5714f5c2b8be1af2dab09fd43ee3445c573817fb0db5b311da0628c9ecd038e61b1b4c2eaa5623232bcc2ba8ed6b7fcb3f7921bbc03ec22f5d55438e33582b002c9a5c1170fc189c3f2c9fddabd8bcd801cfb675fcfdffac676d74b59afe303180c06de76294b7d5282e73c31769b2516f404461b42d16482d8c9ea9bebfea79cd8519852fe0e17f98ab1b59bd1cf1b7856e423b14bab00921f26b722cc82f8f4cadf310ed96a2196e34d3e31d21564276c6e671e4c3c953e5de6c0ce1d711b34489101b65fcc20c58ab635f89809045d3e3246f16f38af549e483dc43b1ce05b2785442a146db4a4ec20d305f5554eca0967ca1dc0e05f07190e1b27259a39bb21fa8d26f36ecfb3daebcafb66852c3064212813b64984777c7c86d6915f9e5172af479c4dcca085ba3ae71554d2d1ea5398f6746c94dcabf1e2feb916fc02394795c27fea5119932f9b4853ba7e68f7186d75115372b4284515bf42290ad4cd3486188ff55f44cfa92e800d564cdef01e58f532d804cdf4fbba4804099d053dbdee709c4036245a72eadca2e30f974102c0f793797117160f08993241aca7c45a45b70b8eaf0bf765b2d7b2fe357f368aef2e2eeba48c975a2e67b99475c5367e1555aa62447326e8e5fb46240f63f34bc4b7cd1b8b8063cbc855888c0bd7491fe55ae572fb6af2edf0396c9dbed2e2eaefb128432cc22e6b3dae21bc3a398a0b4c4b336e9fec0559463093c48aeb49dd74660b7e6a984cfc595324c39c4241093c57487a9c3c857ca8f402c7108c0cf8b8c7d4d5837c9603a9386b8e8628094e4b56ee64a31f10e216272d92423d141e8c5f0f27396c8f6a1531fc6f1c8af01f02c2292049bc6715058e4d6cdeac9f1fa8f46cef3332cb32c1690ec626c827cd4999946fd8bbcac778670c6ffe4e0bc066c17855c9d58b949837bbd299694d59f636125df796be5a456a417df4d9cc7313b15994ca3a7b05b9bbb35418c205120e6539d4707d48afe80deb5677923082054106092a864886f70d010701a08205320482052e3082052a30820526060b2a864886f70d010c0a0102a08204ee308204ea301c060a2a864886f70d010c0103300e04089ebadbc5dffe962d02020800048204c8ffa140237ce6d43a17a750c8775f75db75fe3daacc29e3711f5296cf58afef8d6f45fdb2606856e0f9401bb245aaf74f8ce09bf14e22eba1420c353a063ec3a36c3db8a82e2af71a00813d8b6e678955205bfe8e67784a8fda7309290efc81a59d905e140367ddf0c7440b3838ac0fb31b7d444a29be4a6158ebc3489cb5c354132c9691e9c293a0135efb3fa214bfea8047f26fa600c3be87ea40c204d3fc44f85a867090d87df9d4ae59c5799976dd5be257076c1078710747e23ceee395511f000a6539c79f88a16372393d721c6eea761fa63dada8f01f5b0bfa77aaa65a2f02f161639b1409fa410601f4f5366418300ecd897dccdd14398555d83c88a8c8e398248ed3cde898e2c24691b664dac91adff95700329ffa4c58f628a470f72a26b4eb33b62b96127f3891926771dbcbabeaace79761769819a81239b1f1b85622eb668e399cd25f9a920827b9cc4f6fc9b8991078012b543fba570d14f4eb0da187b60dce0f6dee8a6cc6cd37fccf4b6d78074412f8a871ae21788178ec5787ff7365d9bba1728b91f42dcb05d7560481d1e433ebf0d0b50829a2960ac253de5e9d42016be3c1365478a51d8b134b9ffec742319bbd5b9e6ed40d4e682862270c35467446fa79a39663fca2b9fe507ae038f6315b423f73d76a2a24950500b68c3cb7d251978cca60bc3f33686893940d6fa5c5a8750252f25964e5c59fb52de6d31cce93e90b44e3ae1ae7421a89514de075177101c37e43567f01c8232aca5a09fd628e3d5a6d5dd5d9a7a273cb55f842fae283ae8e79a94c00f28fdb09dbdeea8246740a5107f2b0a22f5adc2cd42e441d21ddde3218b403f3e3409bd2d90a02477f96195daa06a109da182dc1b8d77fbd200bdb62d86e1858d01905ea38a8790c57977355445ffc380eedbc042fb72726763fe8959ae5031f5b4583cff3baf3829b6edf74a7a235bde69103e5d4e8591e14c5a3524308a0d1f4c832aee8f1b2c1019fad0fcff80d4e73676a26f203fd56a4e5607668896feee7ad74e7e10e2fe699b3b6e9cb119c5ca3c69c352c3fb8644db60fc6abccc44836ccbc161d0a2c750c3ba66695a559c0171cdbf2ca6544ccdf247eadd377ff52b6ef3a375652bd9743929c0d839aff5778dfe485219f294dc85b582feceacf466b2a82e785311db4c24a201be15d107af3a1e2a51d9e72a65f6a13b9e04fffe9c50008c4569fcdc9cc7af9858b138189df64006c8702fc8b1866ddef8dc52654fecc889e95bd465f7a6cf6472a09c9c2b8fc1e7bfcb7a8f480123b6fd3244f647cb9b316fb5a5570bebab4d4565152483798607feb07705fda5c7e54d819743dba0c394a928f7837f859cb7ed58551ab7a2f8e6f1d4e05754aff9f2299c24228005f63aeef34653c72b84ddee78b527411b395c5c0f36dc4595eacd737c38f73208943951da23ff3e9f9a7445322ead5d54307adc7965c209d332a24c34305f17638e29297c1f0b8dcf43bfcaa20d6b7b2e106c2aebdda28a7989aeb5b762947d4491ef6cbafffdc79347183174dbe0eae122a9ff35aa2a8aebd94231ee53d9d6f7907e797efbc9de2048ddefb87448ec783583f38fffb4e0e07eeccf9da2d089540845b371e62288543acf21a98b1372a43a90774269bd6032b0a1447b894ab8e2a589636e79c46fbb982bdbe2953983b35e6b378326cc5c792f7a834e0c400d8f34d31492313a32c83f9a53125302306092a864886f70d01091531160414f60526b8d97f1ae083d90c655dcd7e0458947577$ccae69b9fef6124144ae9f566041fe717bbfa2f167b6712ec84233e6647a205dc1441a996defa8fed31b213ce517919d", "123456789012345678901234567890123456789012345678"}, {"$pfxng$512$64$2048$8$396f32bb64a2fa5b$308209683082041f06092a864886f70d010706a08204103082040c0201003082040506092a864886f70d010701301c060a2a864886f70d010c0106300e0408e89d3e68fd0b95b002020800808203d83f879631d938c0b6b627606a85aac3395102d8dc33544ca92f1b9d64f9bcafda920018221f1772883080e7c93dea9d1c719dd4d87e6e713573aafbb0d18b9ba326c3b3a9a14a92428cd5993c1e4cd8af3162eaef4117a6a83e464d509e1ac6dd77dc5df1d8fb4523459800fda94a261fccbfdd49fd93f2920e7e5510e111c39d35223afb19bffa39d1148f810c41a1cac93f736d3d62b202426b6dcd06c1649742eba0dae9ee2711af32b1755b6ecf7ed7633b7aa84f34281368bd509e492ea9650ea3c234726184775bf059a5acb51d407d29e09b6c3850771c4565bf5b3a4f91c2132c835249c5f4c1430dd583bcefeafcabf33bcc4b17b1b813fac43841db8bad2c186799c240673be19dcc06abb777d8cd092f96be2bbf61138f7cc86f5f636cbcd6f80e39049a28f8699f79e5e22d24a589cdd8c06aa0596b6fb1ad8744c05ccd4021aa58991bdd7fb3545b93571ab34f07681107c4fe2ecc5b81301987ced0cdd7edb249572ccb3065d58284b15fe93560b48e83c6cb082a6f7ea34ae43e95776920618f6726a8f9faca2d65fb4aca594d0cc11b78b44a5725456ac976d22de3ed81d336cce0b6dd6e944f1cb930a82ab66eda9419472196c99bedd8148c386ca9b692f0154a328939d7fcccd4dc97c6cdf740e6e05c942187eaffa11cce08f25ffab2ba25d48a3a87cf0049e62ca5bc774a11dd6c7bbd713206466f4bc1264b2102e8a82416ed31b121d0ee08d2dd8cea305cce65b9500f116de912e02c553470d02cd026f99ee78606bb577fe586f40321edb6f18858978ff183653fa4cb22e0436f7e2c867522da74f601f782fab519942dc1418101b5961c45752ac6ab2eec516af19fa78ccf4d9e8906b6d4000b8f68ebab77360138fc7315d205693a6adf7cc983a094997053df0ba1e9bcbe39b3996fabfc5e7749d887f504a7bf8be40d7497041a63afb323fb9a118dcbf22bf55f688a110dc7716e54450183855600de72b5974c27d07726f69dca17e7c003bef61e3d3120611966d4b6190688ec81ee99548b302e4b8b82437a53b66eee98da497b1e859dd9d7a02eec5121deb04814fd588c3ce2c0d9f35140951a78b6f4c2507fe27dde55c0e879713729ca5ef4279f7068f4f90063f51afced5a0d47228e8085191fe0af7a7d0b78b0db0697b4024015787199eb314328535520dc99e902e4755794d614d9eb33f51d35889086baec90699fc72827970f1aa27d097525c074da78075c57613f4a25957115b010f2264cb2d6772c1372cb66f6a7298059da4149bd2e314b0fee857e13fcd001f2284c4cdc14648f502480b97ff917e97201f1b15380ec6e27a588136b80b89142f1c19b38aafe1455462b70cc7499bdd3a35e5cbadc3082054106092a864886f70d010701a08205320482052e3082052a30820526060b2a864886f70d010c0a0102a08204ee308204ea301c060a2a864886f70d010c0103300e04081fb645183f2c3f5702020800048204c8ff64eb11712a0e513e8de34f5185c4b896d5ce774776983f670f182454ac3e77012d9520ea751cf81e1020dc8aa81b71b6f1865c105c5937d783c03f924823d858d2dd414f9fe83472b61d727b25e4b82ea1d0bf6be385ca8fc36d0faefadf2fbea9ec05b4ffbbd9f4564175107515d14a573c5b849a60ea421e1b53a5c16b3d9092bbf7eba493f29516d8a92b3dd84fad3a099df006ec81736d438d673f0f8bb97b83b7a0d58e2d2e63bc48ee412e21c589f392c7a52d6cce44c39344b9ab293d6b86f815399c6554aa2712b83f0ee39b0a144c2d64ac29f8e9fd96db4446d8fa144af3ac54367b46bf9ce27c1f36242499006fc91f9a5b840678e754309111aa4af0172449b15c42481ab6c85cf9cddfec885474d81ee559e134c07485ad8db0500de8472379a43179fbcae9c24f8c2602a89199164d0609e5794c467524c49e1b162a23e93e780490ba80e39505766809c4d2ec198557a98b0917a2bec6a5a5640a42a0dac2057b9e1c04a897c0ecef2a145bdab75032f2d73ca2082392aa952770cb877db6092dd52dadd1f741267ef60484804c312d582d23987e6fed733c9f5b7a1473e83bf1f92962ec78ece29a9ff5fd3f078ef0ad8898f4a3718691ac89abe091d5c9f47fd4c67f679d8952fc599be43a7513e8ff372eb0f9fba12045ea3f88bad0c6c535fdcd4fb905e125a7669283f65feeeb736e32ad0c71cbc38bce567188e85aa40f82da2c7bfac5608d60876d4055efcebfade47e281340d15a2d84f5f03e61196f549a8ae454e23ccc95a940ef875b5786b71827b95dbdc158b811cafcf84df33c3dbd38a197c7dd7c1db12cfbd7d564c5beb3337dfeb3e56d5e2151eb082763181ea0d4a383abb6b899859a2292e4dfd07795bcb5951abbf1dcb5125ae26f08ac210fd94ee2ececddf54bc5ec1b1176d19f6a2c4a8ad9d910537ef4149ea34e57bc5beebff2586763156d5df6c1030bd7d72318cadb0e0143462add1e2f7cd2cfe4e9ebf1dba4f3b352772b0b1225a7c8befb18ca93392452ccebe4b98fddfe27e0c7b01ad1730ad845cb8230a041e24a95802d0ef543e742360e2b6d9a26202e59cb4ba5f18e60000252c67074ded0c3bc178609e997ef24fa01abd35d3f0042a0721787ae895d3637641e2da9a2073dd2499e805d785441c248b8a8234655083700bdc58c8fb80b8c271deb4a81eacf331473f060d39bc9178b7dd4e6dbd06ad9a9b06a5c955b144bc40beeee9118fbd6128f7366f72a20a042864187195075ff51a4e56ebbf61cd0f079814b5e58fe854bcff79b1e6179a3838a704455bfe8e0a49cd98efaf0184c43b0b05d8e9c6cc60e1a7567238db973d2105fa690dcedbab2f9e8bfa706c4aa159b8e233e02c03ddfaec39ec9618a5bc1186997d01f82d38c3066e6a55aaee186e87b2cb144d3769ee436320aa54efb7259c6f5647bb31840b22e900e141a896aac5403547ad309e7933ccffbcab050e0bbbc6b7f0ca0829216df0c2792fcd1e6945cea18ee33ea3913593192c1868d3e339bef11c77ccad7c3de22711090d042bc0175cca415ef92de7b8d580afe0bdbc1a381ee337973994d098a51fc38fb175245d18cbb5510fa2d2b565a38dfc1ef0a9c2ab9fd24388b18bf90304f599fe92fd64324bd534a0a7c1c32b2c4f29653e0fb66e665c1f4c1b0dddcc0ad561fb3604e000aeb031dc04d449fe44ed0f4af82c7d191dc3125302306092a864886f70d01091531160414f60526b8d97f1ae083d90c655dcd7e0458947577$84b8b3b5751ba689b87acdded46edad28a580116c4f2689b6da009aaddab237ad542ffc337378cd6b938a1c1d5b60af582681513196ca8d97416d83e4bf18a99", "123456789012345678901234567890123456789012345678"}, // from CMIYC 2013 contest {"$pfxng$1$20$2000$20$1185a6aa96733b746ea9c46c3665383e3d3f6bbe$3082099f3082060806092a864886f70d010701a08205f9048205f5308205f1308205ed060b2a864886f70d010c0a0102a08204fe308204fa301c060a2a864886f70d010c0103300e04082e3f6c77f0ce6a81020207d0048204d89002593d2dedda947a1f735474371b7a1079f5b94a672aa9e72c4e7bac6a8391790dc39b367bb8e788a2b523decb93847ffd1c5b2aae71f3f013df06b4227dc6f6c5105b28a1dd7a6dcc0c3fd656d5e41f450d0287bedd6fcefcee2d9f7c78fa12e52b918d055b50bf9b46a74b91e651162382cecdc2c669c6be12bcaca2d3fe93882a2223bcf1d9211d30af57dd52c9a5fbc9919cbb0781216c4205d5e1c6b5d164cea6e144db030377c1d47e3f1aedb03817fdf8b02a5311228f96f3c55ffda3f08e4f712c6574777c8fffbbf566ff3518ff159c2493b53adc87928724bebd07abbcf5256446f75041af515a4bf4544a4987cf03c19deafe66030e80925639b2bd3bcf56cea087fc6f56cfefd642678580b212a5f73ac3050dd1102f3fbf3327b95310f2c65ea3c5f792e14e5a75ed4876eb3a6f214120af5a57a2246f01b8ab0318ae8c5082d06e0e4d6e3c27f32b84a968a495cceb85cf6eb454f74528b1d4b9e588598fc60d89152a34e885930abe0dafaad2d9cb1b59597d48f525df8562157719f3eeb3d06967c0c0033f4d93aaa5386a568f0b611b5d4234a823a90e614eadf717ced18ff4fccb800969749fd08ccd3f7031e5c95e17113b8ad54ec92d1ac15ce32d09ea146dbc41235555c85573eebfcf2989595e5a0308a1cfa1152abfdd41018fe844771ac5cbd30eaaa1e041886e034d034134dfe2304bbc3c5d739dc6e5a8ca2d9b5ebc4ea27dd79138539b9dd8adc2d253edc359a071883b39d95e03d2b237e57e266ed44ad9f8ca404e3893ede20373e0009aa32125fe608c9832e3728b9a394a461d24bc92750bf5e81f93239793efef8b65c6d6200ec59299554ebf8602a0d63ef777dfd442da189e2e5b1cee22a80ef654e71f56c4ad637b93b7c3ddefaa43da143d84262d9f99bb1a976c0f61e41a9c4c60b8212501fb1bdd5754c1f36e75afc32ca011a7cfaa7f58b2d4d4d1911f530c47fc744c75c3606e1b8e862cabfd1b000a4494df6eaea424b77ef6436edf3d42462d7a1c27a9d9a87ef4dc2c17d98c07229bdc76209cfbd456dad96341807a06abcb2132a444020d9ee83d213e50188e998f5a3339c9be2a401fa67639fb0a9baf48f5ef6092a6540449faa0894c165c54a776cf32a7c0dfa324cd09e76514f938061353cc609a7ef337b4eb6357537d8c4862cc53b4c71ffebd5e9e78aedffa6098e249ba5078b6e113ff24d9cdba5fe5f0a03a376e3a2f2a72caafd92437c2ef2a08564ff4bf2f1f6ae33f55c2732ffff806e4578e51e11d848dfba53e3d13c90b32c6a3fe436c202712e3f770eb89bd4c14fca4b1424f14f8f1b71e46c1c17e97096e4a5b8654b33c5865716e49ffccced21367a3ffea2e98c57398247665da064c3825fbb7fb84eca71bd313fbf3f9144aeb7841dba9c51b3ebc5d6539dafc1e6dee0fb55634e5ca336e8ca952e6f63b98897b1b9855c2f863106133e0892b2f5fdb02822438c6836b3089253f1d86ffabcf4c4e641aac8fb0dcec7fd9f27646b7269a71cd3fa5133d21e84ac0ed733d22378810b7258ec68f60d8c70b55960c61c8b98772b93c848403dd7a26c506c71138b676d2824d7b58ff85d88243f6cc5a5809813390ffcb67e01cc404cc11bf06fb974c81216ffe84af66a9735ef04a39061f0cb92ea9c0aa01dcf8478ccc70bd2c34e741e361de47180e31e1923cbd90856186c80a0ec8f66790e0205c0ea9254633807e412d7606b593773181db301306092a864886f70d0109153106040401000000305706092a864886f70d010914314a1e4800640066006500390065006100390038002d0062006300620062002d0034006400620034002d0038003200360063002d003200360062003100350061003500630036003600390035306b06092b0601040182371101315e1e5c004d006900630072006f0073006f0066007400200045006e00680061006e006300650064002000430072007900700074006f0067007200610070006800690063002000500072006f00760069006400650072002000760031002e00303082038f06092a864886f70d010706a08203803082037c0201003082037506092a864886f70d010701301c060a2a864886f70d010c0106300e04087a1638971f7941bf020207d080820348b9b8f734e0cdd2dbe86ec68a01904743c26bb49361a41248c21a887e424dc7571cd3d3cbd40b76a9215a60cd15c785ea427c9858f0d9c92e0a3909901e24210972f3efe5b8ef8994c47e51fdddcfb9da32e464a1675117fc564fb6c419cb49856530fcd2e82dce60ffa42bd5d8edc3edeef687ad49ac4cd0caf5b7248a77d354427156940f1874712f7bf9cdb823aede82ff144dcd6b8cc37cc14cb8f1b8077a95389115e5acfcaee06e68c5118c4cb7a9dfdf3008c834b6a5ad77a41761ff08db75eb31215ecf43ebdb634186c611a145ed4478bb2e0768c536389c88b737001a28e0348a97447ae8a2c2fe8301ab26a04032f44637395e2fca2f5bd92b77d4dff19d76c36c104a9bf8fffbe610fcaa14b3438ba8016a4baf5b4cd3ff22f400cc7aaccb6aed14431f2310609e57faf34fa827a9bd78264058190d93c3641ed8422c5d65576c6d5d1853e20c9fad3587dd1343659c41330b377218d1c63f48718d2f0e7cfc89100c9bb57b843d5af2a4f98c0d88d612a33a5f0ac4f2de31fe448b0ed820a4b92b5bce49f2bcf45bef8e9ec3b83de002016fbd82a350cba0bb75824ab4d9542fc87043fb57ec2970f9ca813563aa3783f70bc4b327a728094d05d8e4fd85cfcab8342fdcbcc30ba9fc77d3261c869543e1d9259ec9efb30c07a11f96bace1ba76612c70c141d0a2aa823b79fd7af7cf6a4e8f8e407404b3942ea51d90388e26c5a83e62612c597fc3a79d7d778a5963bdb81f41da84f0977b350dc4f065d5c29f11cc1cd053091f212b2477ace4ce7bf07002eb48d08d54da229d63c040ee106753b0b661fb08a013868a0f891eb3624f6204ad30afaaf05686977478040bba6473ba9d166ec8a78c8b7ef2cd23795d3dbc0919f46570b4c1326afb9641663bb85153df34585b3f6e679651e08192c364079d250afbf021789c1dfdb49e01bef9fccbf2e31ab61d1b4626b7274716756a74a76dcd4d1878b36abe52fe2b8ace82ed733b1e5cc44030ffe58bcefdc48e010f489107b888b07b6edcbf559558e11a49310cab84d15aa492ff8774815b47fb0a684201d01a7fcb4058c3545a56e1877b772b547fbb9fb5174e00679dda61601905b3d22eda8baf8dbfdefe14a532ceef05bfc736cc614ff12c789efdc3fa71c8432ad58e3c9ed4585a87be06719a02e79$41e3ecfdb9dc0bc6e36028e47caf444bce1e7ee9", "Sw3at3r!"}, {NULL} }; static struct custom_salt { int mac_algo; int key_length; int iteration_count; int saltlen; unsigned char salt[20]; int data_length; unsigned char data[MAX_DATA_LENGTH]; } *cur_salt; /* not quite sure why this has to be PTL+2, but if it is not it will only find max_lengh-1 passwords! */ static char (*saved_key)[PLAINTEXT_LENGTH + 2]; static int *saved_len; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p = ciphertext, *ctcopy, *keeptr, *p2; int mac_algo, saltlen, hashhex, extra; if (strncasecmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) // mac_algo goto bail; if (!isdec(p)) goto bail; mac_algo = atoi(p); //if (mac_algo == 0) // hashhex = 40; // for sha0 (Note, not handled by ans1crypt.py) if (mac_algo == 1) // 1 -> SHA1, 256 -> SHA256 hashhex = 40; // hashhex is length of hex string of hash. // else if (mac_algo == 2) // mdc2 (Note, not handled by ans1crypt.py) // hashhex = 32; // else if (mac_algo == 4) // md4 (Note, not handled by ans1crypt.py) // hashhex = 32; // else if (mac_algo == 5) //md5 (Note, not handled by ans1crypt.py) // hashhex = 32; // else if (mac_algo == 160) //ripemd160 (Note, not handled by ans1crypt.py) // hashhex = 40; else if (mac_algo == 224) hashhex = 56; else if (mac_algo == 256) hashhex = 64; else if (mac_algo == 384) hashhex = 96; else if (mac_algo == 512) hashhex = 128; else goto bail; if ((p = strtokm(NULL, "$")) == NULL) // key_length goto bail; if (!isdec(p)) goto bail; if (atoi(p) != (hashhex>>1)) goto bail; if ((p = strtokm(NULL, "$")) == NULL) // iteration_count goto bail; if (!isdec(p)) goto bail; if ((p = strtokm(NULL, "$")) == NULL) // saltlen goto bail; if (!isdec(p)) goto bail; saltlen = atoi(p); if (saltlen > 20) goto bail; if ((p = strtokm(NULL, "$")) == NULL) // salt goto bail; if (hexlenl(p, &extra) > saltlen * 2 || extra) goto bail; if (!ishexlc(p)) goto bail; if ((p = strtokm(NULL, "$")) == NULL) // data goto bail; if (hexlenl(p, &extra) > MAX_DATA_LENGTH * 2 || extra) goto bail; if (!ishexlc(p)) goto bail; if ((p = strtokm(NULL, "$")) == NULL) // stored_hmac (not stored in salt) goto bail; if (hexlenl(p, &extra) != hashhex || extra) goto bail; p2 = strrchr(ciphertext, '$'); if (!p2) goto bail; ++p2; if (strcmp(p, p2)) goto bail; MEM_FREE(keeptr); return 1; bail: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i; char *p = ciphertext, *ctcopy, *keeptr; memset(&cs, 0, sizeof(cs)); ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LENGTH; p = strtokm(ctcopy, "$"); cs.mac_algo = atoi(p); p = strtokm(NULL, "$"); cs.key_length = atoi(p); p = strtokm(NULL, "$"); cs.iteration_count = atoi(p); p = strtokm(NULL, "$"); cs.saltlen = atoi(p); p = strtokm(NULL, "$"); for(i = 0; i < cs.saltlen; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])]; p = strtokm(NULL, "$"); cs.data_length = hexlenl(p, 0) / 2; for(i = 0; i < cs.data_length; i++) cs.data[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])]; p = strtokm(NULL, "$"); MEM_FREE(keeptr); return (void *)&cs; } // we only grab first 20 bytes of the hash, but that is 'good enough'. // it makes a lot of other coding more simple. static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; int i; char *p; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE && *p; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int index; const int count = *pcount; int inc = 1; #if defined(SIMD_COEF_32) if (cur_salt->mac_algo == 1) inc = SSE_GROUP_SZ_SHA1; else if (cur_salt->mac_algo == 256 || cur_salt->mac_algo == 224) inc = SSE_GROUP_SZ_SHA256; #if defined(SIMD_COEF_64) else if (cur_salt->mac_algo == 512 || cur_salt->mac_algo == 384) inc = SSE_GROUP_SZ_SHA512; #endif #endif #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += inc) { #if !defined(SIMD_COEF_32) if (cur_salt->mac_algo == 1) { unsigned char mackey[20]; int mackeylen = cur_salt->key_length; pkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, (unsigned char*)saved_key[index], saved_len[index], cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); hmac_sha1(mackey, mackeylen, cur_salt->data, cur_salt->data_length, (unsigned char*)crypt_out[index], BINARY_SIZE); } else if (cur_salt->mac_algo == 256) { unsigned char mackey[32]; int mackeylen = cur_salt->key_length; pkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, (unsigned char*)saved_key[index], saved_len[index], cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); hmac_sha256(mackey, mackeylen, cur_salt->data, cur_salt->data_length, (unsigned char*)crypt_out[index], BINARY_SIZE); } else if (cur_salt->mac_algo == 512) { unsigned char mackey[64]; int mackeylen = cur_salt->key_length; pkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, (unsigned char*)saved_key[index], saved_len[index], cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); hmac_sha512(mackey, mackeylen, cur_salt->data, cur_salt->data_length, (unsigned char*)crypt_out[index], BINARY_SIZE); } else if (cur_salt->mac_algo == 224) { unsigned char mackey[32]; int mackeylen = cur_salt->key_length; pkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, (unsigned char*)saved_key[index], saved_len[index], cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); hmac_sha224(mackey, mackeylen, cur_salt->data, cur_salt->data_length, (unsigned char*)crypt_out[index], BINARY_SIZE); } else if (cur_salt->mac_algo == 384) { unsigned char mackey[64]; int mackeylen = cur_salt->key_length; pkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, (unsigned char*)saved_key[index], saved_len[index], cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); hmac_sha384(mackey, mackeylen, cur_salt->data, cur_salt->data_length, (unsigned char*)crypt_out[index], BINARY_SIZE); } #else if (cur_salt->mac_algo == 1) { unsigned char *mackey[SSE_GROUP_SZ_SHA1], real_keys[SSE_GROUP_SZ_SHA1][20]; const unsigned char *keys[SSE_GROUP_SZ_SHA1]; int mackeylen = cur_salt->key_length, j; size_t lens[SSE_GROUP_SZ_SHA1]; for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) { mackey[j] = real_keys[j]; lens[j] = saved_len[index+j]; keys[j] = (const unsigned char*)(saved_key[index+j]); } pkcs12_pbe_derive_key_simd(cur_salt->mac_algo, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, keys, lens, cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) { hmac_sha1(mackey[j], mackeylen, cur_salt->data, cur_salt->data_length, (unsigned char*)crypt_out[index+j], BINARY_SIZE); } } else if (cur_salt->mac_algo == 256) { unsigned char *mackey[SSE_GROUP_SZ_SHA256], real_keys[SSE_GROUP_SZ_SHA256][32]; const unsigned char *keys[SSE_GROUP_SZ_SHA256]; int mackeylen = cur_salt->key_length, j; size_t lens[SSE_GROUP_SZ_SHA256]; for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) { mackey[j] = real_keys[j]; lens[j] = saved_len[index+j]; keys[j] = (const unsigned char*)(saved_key[index+j]); } pkcs12_pbe_derive_key_simd(cur_salt->mac_algo, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, keys, lens, cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) { hmac_sha256(mackey[j], mackeylen, cur_salt->data, cur_salt->data_length, (unsigned char*)crypt_out[index+j], BINARY_SIZE); } } else if (cur_salt->mac_algo == 512) { #if defined(SIMD_COEF_64) unsigned char *mackey[SSE_GROUP_SZ_SHA512], real_keys[SSE_GROUP_SZ_SHA512][64]; const unsigned char *keys[SSE_GROUP_SZ_SHA512]; int mackeylen = cur_salt->key_length, j; size_t lens[SSE_GROUP_SZ_SHA512]; for (j = 0; j < SSE_GROUP_SZ_SHA512; ++j) { mackey[j] = real_keys[j]; lens[j] = saved_len[index+j]; keys[j] = (const unsigned char*)(saved_key[index+j]); } pkcs12_pbe_derive_key_simd(cur_salt->mac_algo, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, keys, lens, cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); for (j = 0; j < SSE_GROUP_SZ_SHA512; ++j) { hmac_sha512(mackey[j], mackeylen, cur_salt->data, cur_salt->data_length, (unsigned char*)crypt_out[index+j], BINARY_SIZE); } #else int j; for (j = 0; j < inc; ++j) { unsigned char mackey[64]; int mackeylen = cur_salt->key_length; pkcs12_pbe_derive_key(512, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, (unsigned char*)saved_key[index+j], saved_len[index+j], cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); hmac_sha512(mackey, mackeylen, cur_salt->data, cur_salt->data_length, (unsigned char*)crypt_out[index+j], BINARY_SIZE); } #endif } else if (cur_salt->mac_algo == 224) { int j; for (j = 0; j < inc; ++j) { unsigned char mackey[32]; int mackeylen = cur_salt->key_length; pkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, (unsigned char*)saved_key[index+j], saved_len[index+j], cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); hmac_sha224(mackey, mackeylen, cur_salt->data, cur_salt->data_length, (unsigned char*)crypt_out[index+j], BINARY_SIZE); } } else if (cur_salt->mac_algo == 384) { int j; for (j = 0; j < inc; ++j) { unsigned char mackey[64]; int mackeylen = cur_salt->key_length; pkcs12_pbe_derive_key(cur_salt->mac_algo, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, (unsigned char*)saved_key[index+j], saved_len[index+j], cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); hmac_sha384(mackey, mackeylen, cur_salt->data, cur_salt->data_length, (unsigned char*)crypt_out[index+j], BINARY_SIZE); } } #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(saved_key[index])); } static char *get_key(int index) { return saved_key[index]; } /* report iteration count as tunable cost value */ static unsigned int get_mac_type(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->mac_algo; } struct fmt_main fmt_pfx_ng = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "mac-type", }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { get_mac_type, }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
parallelReadTiff.c
//#include "tiffio.h" #include <stdio.h> #include <stdint.h> #include "tiffio.h" #include "omp.h" #include "mex.h" //mex -v COPTIMFLAGS="-O3 -fwrapv -DNDEBUG" CFLAGS='$CFLAGS -O3 -fopenmp' LDFLAGS='$LDFLAGS -O3 -fopenmp' '-I/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' '-L/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' -ltiff /clusterfs/fiona/matthewmueller/parallelTiffTesting/main.c //mex COMPFLAGS='$COMPFLAGS /openmp' '-IC:\Program Files (x86)\tiff\include\' '-LC:\Program Files (x86)\tiff\lib\' -ltiffd.lib C:\Users\Matt\Documents\parallelTiff\main.cpp void DummyHandler(const char* module, const char* fmt, va_list ap) { // ignore errors and warnings } void* mallocDynamic(uint64_t x, uint64_t bits){ switch(bits){ case 8: return malloc(x*sizeof(uint8_t)); case 16: return malloc(x*sizeof(uint16_t)); case 32: return malloc(x*sizeof(float)); case 64: return malloc(x*sizeof(double)); default: printf("Image is not 8/16 bit, single, or double. Using single."); return malloc(x*sizeof(float)); } } void readTiffParallel(uint64_t x, uint64_t y, uint64_t z, char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint64_t stripSize){ int32_t numWorkers = omp_get_max_threads(); int32_t batchSize = (z-1)/numWorkers+1; int32_t w; uint8_t err = 0; char errString[10000]; #pragma omp parallel for for(w = 0; w < numWorkers; w++){ TIFF* tif = TIFFOpen(fileName, "r"); if(!tif){ #pragma omp critical { err = 1; sprintf(errString,"Thread %d: File \"%s\" cannot be opened\n",w,fileName); } } void* buffer = mallocDynamic(x*stripSize, bits); for(int64_t dir = startSlice+(w*batchSize); dir < startSlice+((w+1)*batchSize); dir++){ if(dir>=z+startSlice || err) break; uint8_t counter = 0; while(!TIFFSetDirectory(tif, (uint64_t)dir) && counter<3){ printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n",w,fileName,dir,counter+1); counter++; } for (int64_t i = 0; i*stripSize < y; i++) { //loading the data into a buffer switch(bits){ case 8: // Map Values to flip x and y for MATLAB TIFFReadEncodedStrip(tif, i,(uint8_t*)buffer, stripSize*x*(bits/8)); for(int64_t k = 0; k < stripSize; k++){ if((k+(i*stripSize)) >= y) break; for(int64_t j = 0; j < x; j++){ ((uint8_t*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((uint8_t*)buffer)[j+(k*x)]; } } break; case 16: // Map Values to flip x and y for MATLAB TIFFReadEncodedStrip(tif, i,(uint16_t*)buffer, stripSize*x*(bits/8)); for(int64_t k = 0; k < stripSize; k++){ if((k+(i*stripSize)) >= y) break; for(int64_t j = 0; j < x; j++){ ((uint16_t*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((uint16_t*)buffer)[j+(k*x)]; } } break; case 32: // Map Values to flip x and y for MATLAB TIFFReadEncodedStrip(tif, i,(float*)buffer, stripSize*x*(bits/8)); for(int64_t k = 0; k < stripSize; k++){ if((k+(i*stripSize)) >= y) break; for(int64_t j = 0; j < x; j++){ ((float*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((float*)buffer)[j+(k*x)]; } } break; case 64: // Map Values to flip x and y for MATLAB TIFFReadEncodedStrip(tif, i,(double*)buffer, stripSize*x*(bits/8)); for(int64_t k = 0; k < stripSize; k++){ if((k+(i*stripSize)) >= y) break; for(int64_t j = 0; j < x; j++){ ((double*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((double*)buffer)[j+(k*x)]; } } break; } } } free(buffer); TIFFClose(tif); } if(err) mexErrMsgIdAndTxt("tiff:threadError",errString); } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { char* fileName = mxArrayToString(prhs[0]); TIFFSetWarningHandler(DummyHandler); TIFF* tif = TIFFOpen(fileName, "r"); if(!tif) mexErrMsgIdAndTxt("tiff:inputError","File \"%s\" cannot be opened",fileName); uint64_t x = 1,y = 1,z = 1,bits = 1, startSlice = 0; TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &x); TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &y); if(nrhs == 1){ uint16_t s = 0, m = 0, t = 1; while(TIFFSetDirectory(tif,t)){ s = t; t *= 8; if(s > t){ t = 65535; printf("Number of slices > 32768"); break; } } while(s != t){ m = (s+t+1)/2; if(TIFFSetDirectory(tif,m)){ s = m; } else{ if(m > 0) t = m-1; else t = m; } } z = s+1; } else{ if(mxGetN(prhs[1]) != 2){ mexErrMsgIdAndTxt("tiff:inputError","Input range is not 2"); } else{ startSlice = (uint64_t)*(mxGetPr(prhs[1]))-1; z = (uint64_t)*((mxGetPr(prhs[1])+1))-startSlice; if (!TIFFSetDirectory(tif,startSlice+z-1) || !TIFFSetDirectory(tif,startSlice)){ mexErrMsgIdAndTxt("tiff:rangeOutOfBound","Range is out of bounds"); } } } TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits); uint64_t stripSize = 1; TIFFGetField(tif, TIFFTAG_ROWSPERSTRIP, &stripSize); TIFFClose(tif); uint64_t dim[3]; dim[0] = y; dim[1] = x; dim[2] = z; if(bits == 8){ plhs[0] = mxCreateNumericArray(3,dim,mxUINT8_CLASS, mxREAL); uint8_t* tiff = (uint8_t*)mxGetPr(plhs[0]); readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize); } else if(bits == 16){ plhs[0] = mxCreateNumericArray(3,dim,mxUINT16_CLASS, mxREAL); uint16_t* tiff = (uint16_t*)mxGetPr(plhs[0]); readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize); } else if(bits == 32){ plhs[0] = mxCreateNumericArray(3,dim,mxSINGLE_CLASS, mxREAL); float* tiff = (float*)mxGetPr(plhs[0]); readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize); } else if(bits == 64){ plhs[0] = mxCreateNumericArray(3,dim,mxDOUBLE_CLASS, mxREAL); double* tiff = (double*)mxGetPr(plhs[0]); readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize); } else{ mexErrMsgIdAndTxt("tiff:dataTypeError","Data type not suppported"); } }
DRB081-func-arg-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A function argument passed by value should be private inside the function. Variable i is read only. */ #include "omprace.h" #include <omp.h> #include<stdio.h> #include<assert.h> /* argument pass-by-value */ void f1(int q) { q += 1; } int main() { omprace_init(); int i=0; #pragma omp parallel { f1(i); } assert (i==0); printf ("i=%d\n",i); omprace_fini(); return 0; }
threading.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for * license information. */ #ifndef LIGHTGBM_UTILS_THREADING_H_ #define LIGHTGBM_UTILS_THREADING_H_ #include <LightGBM/meta.h> #include <LightGBM/utils/common.h> #include <LightGBM/utils/openmp_wrapper.h> #include <algorithm> #include <functional> #include <vector> namespace LightGBM { class Threading { public: template <typename INDEX_T> static inline void BlockInfo(INDEX_T cnt, INDEX_T min_cnt_per_block, int* out_nblock, INDEX_T* block_size) { int num_threads = OMP_NUM_THREADS(); BlockInfo<INDEX_T>(num_threads, cnt, min_cnt_per_block, out_nblock, block_size); } template <typename INDEX_T> static inline void BlockInfo(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, int* out_nblock, INDEX_T* block_size) { *out_nblock = std::min<int>( num_threads, static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); if (*out_nblock > 1) { *block_size = SIZE_ALIGNED((cnt + (*out_nblock) - 1) / (*out_nblock)); } else { *block_size = cnt; } } template <typename INDEX_T> static inline void BlockInfoForceSize(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, int* out_nblock, INDEX_T* block_size) { *out_nblock = std::min<int>( num_threads, static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); if (*out_nblock > 1) { *block_size = (cnt + (*out_nblock) - 1) / (*out_nblock); // force the block size to the times of min_cnt_per_block *block_size = (*block_size + min_cnt_per_block - 1) / min_cnt_per_block * min_cnt_per_block; } else { *block_size = cnt; } } template <typename INDEX_T> static inline void BlockInfoForceSize(INDEX_T cnt, INDEX_T min_cnt_per_block, int* out_nblock, INDEX_T* block_size) { int num_threads = OMP_NUM_THREADS(); BlockInfoForceSize<INDEX_T>(num_threads, cnt, min_cnt_per_block, out_nblock, block_size); } template <typename INDEX_T> static inline int For( INDEX_T start, INDEX_T end, INDEX_T min_block_size, const std::function<void(int, INDEX_T, INDEX_T)>& inner_fun) { int n_block = 1; INDEX_T num_inner = end - start; BlockInfo<INDEX_T>(num_inner, min_block_size, &n_block, &num_inner); OMP_INIT_EX(); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < n_block; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T inner_start = start + num_inner * i; INDEX_T inner_end = std::min(end, inner_start + num_inner); inner_fun(i, inner_start, inner_end); OMP_LOOP_EX_END(); } OMP_THROW_EX(); return n_block; } }; template <typename INDEX_T, bool TWO_BUFFER> class ParallelPartitionRunner { public: ParallelPartitionRunner(INDEX_T num_data, INDEX_T min_block_size) : min_block_size_(min_block_size) { num_threads_ = OMP_NUM_THREADS(); left_.resize(num_data); if (TWO_BUFFER) { right_.resize(num_data); } offsets_.resize(num_threads_); left_cnts_.resize(num_threads_); right_cnts_.resize(num_threads_); left_write_pos_.resize(num_threads_); right_write_pos_.resize(num_threads_); } ~ParallelPartitionRunner() {} void ReSize(INDEX_T num_data) { left_.resize(num_data); if (TWO_BUFFER) { right_.resize(num_data); } } template<bool FORCE_SIZE> INDEX_T Run( INDEX_T cnt, const std::function<INDEX_T(int, INDEX_T, INDEX_T, INDEX_T*, INDEX_T*)>& func, INDEX_T* out) { int nblock = 1; INDEX_T inner_size = cnt; if (FORCE_SIZE) { Threading::BlockInfoForceSize<INDEX_T>(num_threads_, cnt, min_block_size_, &nblock, &inner_size); } else { Threading::BlockInfo<INDEX_T>(num_threads_, cnt, min_block_size_, &nblock, &inner_size); } OMP_INIT_EX(); #pragma omp parallel for schedule(static, 1) num_threads(num_threads_) for (int i = 0; i < nblock; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T cur_start = i * inner_size; INDEX_T cur_cnt = std::min(inner_size, cnt - cur_start); offsets_[i] = cur_start; if (cur_cnt <= 0) { left_cnts_[i] = 0; right_cnts_[i] = 0; continue; } auto left_ptr = left_.data() + cur_start; INDEX_T* right_ptr = nullptr; if (TWO_BUFFER) { right_ptr = right_.data() + cur_start; } // split data inner, reduce the times of function called INDEX_T cur_left_count = func(i, cur_start, cur_cnt, left_ptr, right_ptr); if (!TWO_BUFFER) { // reverse for one buffer std::reverse(left_ptr + cur_left_count, left_ptr + cur_cnt); } left_cnts_[i] = cur_left_count; right_cnts_[i] = cur_cnt - cur_left_count; OMP_LOOP_EX_END(); } OMP_THROW_EX(); left_write_pos_[0] = 0; right_write_pos_[0] = 0; for (int i = 1; i < nblock; ++i) { left_write_pos_[i] = left_write_pos_[i - 1] + left_cnts_[i - 1]; right_write_pos_[i] = right_write_pos_[i - 1] + right_cnts_[i - 1]; } data_size_t left_cnt = left_write_pos_[nblock - 1] + left_cnts_[nblock - 1]; auto right_start = out + left_cnt; #pragma omp parallel for schedule(static, 1) num_threads(num_threads_) for (int i = 0; i < nblock; ++i) { std::copy_n(left_.data() + offsets_[i], left_cnts_[i], out + left_write_pos_[i]); if (TWO_BUFFER) { std::copy_n(right_.data() + offsets_[i], right_cnts_[i], right_start + right_write_pos_[i]); } else { std::copy_n(left_.data() + offsets_[i] + left_cnts_[i], right_cnts_[i], right_start + right_write_pos_[i]); } } return left_cnt; } private: int num_threads_; INDEX_T min_block_size_; std::vector<INDEX_T> left_; std::vector<INDEX_T> right_; std::vector<INDEX_T> offsets_; std::vector<INDEX_T> left_cnts_; std::vector<INDEX_T> right_cnts_; std::vector<INDEX_T> left_write_pos_; std::vector<INDEX_T> right_write_pos_; }; } // namespace LightGBM #endif // LightGBM_UTILS_THREADING_H_
convolution_sgemm_pack16to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack16to1_avx512(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float* bias = _bias; Mat tmp; if (size >= 16) tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + size % 8, 64u, 16, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + size % 8, 64u, 16, opt.workspace_allocator); else tmp.create(maxk, inch, size, 64u, 16, opt.workspace_allocator); { int remain_size_start = 0; int nn_size = size >> 4; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 16; float* tmpptr = tmp.channel(i / 16); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16; for (int k = 0; k < maxk; k++) { // transpose 16x16 __m512 _r0 = _mm512_loadu_ps(img0); __m512 _r1 = _mm512_loadu_ps(img0 + 16); __m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2); __m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3); __m512 _r4 = _mm512_loadu_ps(img0 + 16 * 4); __m512 _r5 = _mm512_loadu_ps(img0 + 16 * 5); __m512 _r6 = _mm512_loadu_ps(img0 + 16 * 6); __m512 _r7 = _mm512_loadu_ps(img0 + 16 * 7); __m512 _r8 = _mm512_loadu_ps(img0 + 16 * 8); __m512 _r9 = _mm512_loadu_ps(img0 + 16 * 9); __m512 _ra = _mm512_loadu_ps(img0 + 16 * 10); __m512 _rb = _mm512_loadu_ps(img0 + 16 * 11); __m512 _rc = _mm512_loadu_ps(img0 + 16 * 12); __m512 _rd = _mm512_loadu_ps(img0 + 16 * 13); __m512 _re = _mm512_loadu_ps(img0 + 16 * 14); __m512 _rf = _mm512_loadu_ps(img0 + 16 * 15); transpose16_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb, _rc, _rd, _re, _rf); _mm512_storeu_ps(tmpptr, _r0); _mm512_storeu_ps(tmpptr + 16, _r1); _mm512_storeu_ps(tmpptr + 16 * 2, _r2); _mm512_storeu_ps(tmpptr + 16 * 3, _r3); _mm512_storeu_ps(tmpptr + 16 * 4, _r4); _mm512_storeu_ps(tmpptr + 16 * 5, _r5); _mm512_storeu_ps(tmpptr + 16 * 6, _r6); _mm512_storeu_ps(tmpptr + 16 * 7, _r7); _mm512_storeu_ps(tmpptr + 16 * 8, _r8); _mm512_storeu_ps(tmpptr + 16 * 9, _r9); _mm512_storeu_ps(tmpptr + 16 * 10, _ra); _mm512_storeu_ps(tmpptr + 16 * 11, _rb); _mm512_storeu_ps(tmpptr + 16 * 12, _rc); _mm512_storeu_ps(tmpptr + 16 * 13, _rd); _mm512_storeu_ps(tmpptr + 16 * 14, _re); _mm512_storeu_ps(tmpptr + 16 * 15, _rf); img0 += size * 16; tmpptr += 256; } } } remain_size_start += nn_size << 4; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16; for (int k = 0; k < maxk; k++) { // transpose 16x8 __m512 _r0 = _mm512_loadu_ps(img0); __m512 _r1 = _mm512_loadu_ps(img0 + 16); __m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2); __m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3); __m512 _r4 = _mm512_loadu_ps(img0 + 16 * 4); __m512 _r5 = _mm512_loadu_ps(img0 + 16 * 5); __m512 _r6 = _mm512_loadu_ps(img0 + 16 * 6); __m512 _r7 = _mm512_loadu_ps(img0 + 16 * 7); __m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1); __m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1); __m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3); __m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3); __m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5); __m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5); __m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7); __m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7); __m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); _tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0)); _tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0)); _tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0)); _tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0)); _tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1)); _tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1)); _tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1)); _tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1)); _r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0)); _r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0)); _r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); _r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1)); _r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_storeu_ps(tmpptr, _r0); _mm512_storeu_ps(tmpptr + 16, _r1); _mm512_storeu_ps(tmpptr + 16 * 2, _r2); _mm512_storeu_ps(tmpptr + 16 * 3, _r3); _mm512_storeu_ps(tmpptr + 16 * 4, _r4); _mm512_storeu_ps(tmpptr + 16 * 5, _r5); _mm512_storeu_ps(tmpptr + 16 * 6, _r6); _mm512_storeu_ps(tmpptr + 16 * 7, _r7); img0 += size * 16; tmpptr += 128; } } } remain_size_start += nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + i % 8); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16; for (int k = 0; k < maxk; k++) { __m512 _val = _mm512_load_ps(img0); _mm512_store_ps(tmpptr, _val); img0 += size * 16; tmpptr += 16; } } } } int nn_outch = outch / 8; int remain_outch_start = nn_outch * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p + 1); float* outptr2 = top_blob.channel(p + 2); float* outptr3 = top_blob.channel(p + 3); float* outptr4 = top_blob.channel(p + 4); float* outptr5 = top_blob.channel(p + 5); float* outptr6 = top_blob.channel(p + 6); float* outptr7 = top_blob.channel(p + 7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 15 < size; i += 16) { const float* tmpptr = tmp.channel(i / 16); const float* kptr = kernel.channel(p / 8); int nn = inch * maxk * 16; // inch always > 0 __m512 _sum0 = _mm512_set1_ps(biasptr[0]); __m512 _sum1 = _mm512_set1_ps(biasptr[1]); __m512 _sum2 = _mm512_set1_ps(biasptr[2]); __m512 _sum3 = _mm512_set1_ps(biasptr[3]); __m512 _sum4 = _mm512_set1_ps(biasptr[4]); __m512 _sum5 = _mm512_set1_ps(biasptr[5]); __m512 _sum6 = _mm512_set1_ps(biasptr[6]); __m512 _sum7 = _mm512_set1_ps(biasptr[7]); for (int j = 0; j < nn; j++) { __m512 _val0 = _mm512_load_ps(tmpptr); __m512 _w0 = _mm512_set1_ps(kptr[0]); __m512 _w1 = _mm512_set1_ps(kptr[1]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm512_fmadd_ps(_val0, _w1, _sum1); __m512 _w2 = _mm512_set1_ps(kptr[2]); __m512 _w3 = _mm512_set1_ps(kptr[3]); _sum2 = _mm512_fmadd_ps(_val0, _w2, _sum2); _sum3 = _mm512_fmadd_ps(_val0, _w3, _sum3); __m512 _w4 = _mm512_set1_ps(kptr[4]); __m512 _w5 = _mm512_set1_ps(kptr[5]); _sum4 = _mm512_fmadd_ps(_val0, _w4, _sum4); _sum5 = _mm512_fmadd_ps(_val0, _w5, _sum5); __m512 _w6 = _mm512_set1_ps(kptr[6]); __m512 _w7 = _mm512_set1_ps(kptr[7]); _sum6 = _mm512_fmadd_ps(_val0, _w6, _sum6); _sum7 = _mm512_fmadd_ps(_val0, _w7, _sum7); tmpptr += 16; kptr += 8; } _mm512_storeu_ps(outptr0, _sum0); _mm512_storeu_ps(outptr1, _sum1); _mm512_storeu_ps(outptr2, _sum2); _mm512_storeu_ps(outptr3, _sum3); _mm512_storeu_ps(outptr4, _sum4); _mm512_storeu_ps(outptr5, _sum5); _mm512_storeu_ps(outptr6, _sum6); _mm512_storeu_ps(outptr7, _sum7); outptr0 += 16; outptr1 += 16; outptr2 += 16; outptr3 += 16; outptr4 += 16; outptr5 += 16; outptr6 += 16; outptr7 += 16; } for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const float* kptr = kernel.channel(p / 8); int nn = inch * maxk * 16; // inch always > 0 __m256 _sum0 = _mm256_broadcast_ss(biasptr); __m256 _sum1 = _mm256_broadcast_ss(biasptr + 1); __m256 _sum2 = _mm256_broadcast_ss(biasptr + 2); __m256 _sum3 = _mm256_broadcast_ss(biasptr + 3); __m256 _sum4 = _mm256_broadcast_ss(biasptr + 4); __m256 _sum5 = _mm256_broadcast_ss(biasptr + 5); __m256 _sum6 = _mm256_broadcast_ss(biasptr + 6); __m256 _sum7 = _mm256_broadcast_ss(biasptr + 7); for (int j = 0; j < nn; j++) { __m256 _val0 = _mm256_load_ps(tmpptr); __m256 _w0 = _mm256_broadcast_ss(kptr); __m256 _w1 = _mm256_broadcast_ss(kptr + 1); _sum0 = _mm256_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_fmadd_ps(_val0, _w1, _sum1); __m256 _w2 = _mm256_broadcast_ss(kptr + 2); __m256 _w3 = _mm256_broadcast_ss(kptr + 3); _sum2 = _mm256_fmadd_ps(_val0, _w2, _sum2); _sum3 = _mm256_fmadd_ps(_val0, _w3, _sum3); __m256 _w4 = _mm256_broadcast_ss(kptr + 4); __m256 _w5 = _mm256_broadcast_ss(kptr + 5); _sum4 = _mm256_fmadd_ps(_val0, _w4, _sum4); _sum5 = _mm256_fmadd_ps(_val0, _w5, _sum5); __m256 _w6 = _mm256_broadcast_ss(kptr + 6); __m256 _w7 = _mm256_broadcast_ss(kptr + 7); _sum6 = _mm256_fmadd_ps(_val0, _w6, _sum6); _sum7 = _mm256_fmadd_ps(_val0, _w7, _sum7); tmpptr += 8; kptr += 8; } _mm256_storeu_ps(outptr0, _sum0); _mm256_storeu_ps(outptr1, _sum1); _mm256_storeu_ps(outptr2, _sum2); _mm256_storeu_ps(outptr3, _sum3); _mm256_storeu_ps(outptr4, _sum4); _mm256_storeu_ps(outptr5, _sum5); _mm256_storeu_ps(outptr6, _sum6); _mm256_storeu_ps(outptr7, _sum7); outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; outptr4 += 8; outptr5 += 8; outptr6 += 8; outptr7 += 8; } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + i % 8); const float* kptr = kernel.channel(p / 8); int nn = inch * maxk * 16; // inch always > 0 __m256 _sum = _mm256_loadu_ps(biasptr); for (int j = 0; j < nn; j++) { __m256 _val0 = _mm256_broadcast_ss(tmpptr); __m256 _w0 = _mm256_load_ps(kptr); _sum = _mm256_fmadd_ps(_val0, _w0, _sum); tmpptr += 1; kptr += 8; } float sum[8]; _mm256_storeu_ps(sum, _sum); outptr0[0] = sum[0]; outptr1[0] = sum[1]; outptr2[0] = sum[2]; outptr3[0] = sum[3]; outptr4[0] = sum[4]; outptr5[0] = sum[5]; outptr6[0] = sum[6]; outptr7[0] = sum[7]; outptr0 += 1; outptr1 += 1; outptr2 += 1; outptr3 += 1; outptr4 += 1; outptr5 += 1; outptr6 += 1; outptr7 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* outptr0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; int i = 0; for (; i + 15 < size; i += 16) { const float* tmpptr = tmp.channel(i / 16); const float* kptr = kernel.channel(p / 8 + p % 8); int nn = inch * maxk * 16; // inch always > 0 __m512 _sum0 = _mm512_set1_ps(bias0); for (int j = 0; j < nn; j++) { __m512 _val0 = _mm512_load_ps(tmpptr); __m512 _w0 = _mm512_set1_ps(kptr[0]); _sum0 = _mm512_fmadd_ps(_w0, _val0, _sum0); tmpptr += 16; kptr += 1; } _mm512_storeu_ps(outptr0, _sum0); outptr0 += 16; } for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const float* kptr = kernel.channel(p / 8 + p % 8); int nn = inch * maxk * 16; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(bias0); for (int j = 0; j < nn; j++) { __m256 _val0 = _mm256_load_ps(tmpptr); __m256 _w0 = _mm256_broadcast_ss(kptr); _sum0 = _mm256_fmadd_ps(_w0, _val0, _sum0); tmpptr += 8; kptr += 1; } _mm256_storeu_ps(outptr0, _sum0); outptr0 += 8; } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + i % 8); const float* kptr = kernel.channel(p / 8 + p % 8); int nn = inch * maxk; // inch always > 0 float sum0 = bias0; __m512 _sum0 = _mm512_setzero_ps(); for (int j = 0; j < nn; j++) { __m512 _val0 = _mm512_load_ps(tmpptr); __m512 _w0 = _mm512_load_ps(kptr); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); tmpptr += 16; kptr += 16; } sum0 += _mm512_comp_reduce_add_ps(_sum0); outptr0[0] = sum0; outptr0 += 1; } } } static void convolution_im2col_sgemm_transform_kernel_pack16to1_avx512(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = pb-pa-maxk-inch/pa-outch/pb Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(8 * 16 * maxk, inch / 16, outch / 8 + outch % 8); int q = 0; for (; q + 7 < outch; q += 8) { float* g00 = kernel_tm.channel(q / 8); for (int p = 0; p + 15 < inch; p += 16) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 16; i++) { for (int j = 0; j < 8; j++) { const float* k00 = kernel.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } for (; q < outch; q++) { const Mat k0 = kernel.channel(q); float* g00 = kernel_tm.channel(q / 8 + q % 8); for (int p = 0; p + 15 < inch; p += 16) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 16; i++) { const float* k00 = k0.row(p + i); g00[0] = k00[k]; g00++; } } } } } static void convolution_im2col_sgemm_pack16to1_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 16; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); float* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const float* sptr = img.row(dilation_h * u) + dilation_w * v * 16; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { __m512 _val = _mm512_load_ps(sptr); _mm512_store_ps(ptr, _val); sptr += stride_w * 16; ptr += 16; } sptr += gap; } } } } } im2col_sgemm_pack16to1_avx512(bottom_im2col, top_blob, kernel, _bias, opt); }
zSchCompUdt-cuda.c
/*! @file * \brief This file contains the main loop of pzgstrf which involves * rank k update of the Schur complement. * Uses CUDA GPU. * * <pre> * -- Distributed SuperLU routine (version 4.0) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * October 1, 2014 * */ #define SCHEDULE_STRATEGY dynamic #define cublasCheckErrors(fn) \ do { \ cublasStatus_t __err = fn; \ if (__err != CUBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "Fatal cublas error: %d (at %s:%d)\n", \ (int)(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while(0); if ( msg0 && msg2 ) { /* L(:,k) and U(k,:) are not empty. */ ldu =0; full =1; int cum_nrow; int temp_nbrow; lptr = lptr0; luptr = luptr0; nbrow= lsub[1]; if (myrow==krow) nbrow = lsub[1]-lsub[3]; if (nbrow>0) { int ncol_max = SUPERLU_MIN(buffer_size/nbrow,bigu_size/ldt); int num_streams_used, /*number of streams that will be used*/ ncpu_blks; /*Number of CPU dgemm blks*/ int jjj, jjj_st,jjj_global; for (j = jj0; j < nub; ++j) { arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid ); ncols =0 ; //initialize at 0 jj = iukp; int temp_ldu=0; for (; jj < iukp+nsupc; ++jj) { segsize = klst - usub[jj]; if ( segsize ) { ++ncols; } temp_ldu = SUPERLU_MAX(temp_ldu, segsize); } full_u_cols[j] = ncols; blk_ldu[j] = temp_ldu; } /* end for j = jj0..nub */ jjj = jj0; /* initialization */ // #pragma omp barrier while ( jjj < nub ) { jjj_st=jjj; #ifdef _OPENMP #pragma omp single #endif { ldu = blk_ldu[jjj_st]; for (j = jjj_st; j < nub ; ++j) { /* prefix sum */ if (j != jjj_st) full_u_cols[j] += full_u_cols[j-1]; ldu = SUPERLU_MAX(ldu, blk_ldu[j]); /* break condition */ /* the number of columns that can be processed is limited by buffer size*/ if (full_u_cols[j]+((j+1==nub)?0:full_u_cols[j+1]) > ncol_max) { break; } } /* end for j=jjj_st to nub */ jjj_global = SUPERLU_MIN(nub, j+1); /* Maximum value of jjj will be nub */ // TAU_STATIC_TIMER_START("work_divison"); /* Divide CPU-GPU gemm here */ gemm_division_cpu_gpu( &num_streams_used, /*number of streams that will be used*/ stream_end_col, /*array holding last column blk for each partition*/ &ncpu_blks, /*Number of CPU gemm blks*/ /*input*/ nbrow, /*number of row in A matrix*/ ldu, /*number of k in dgemm*/ nstreams, full_u_cols + jjj_st, /*array containing prefix sum of work load*/ jjj_global-jjj_st /*Number of work load */ ); // TAU_STATIC_TIMER_STOP("work_divison"); } /* pragma omp single */ jjj = jjj_global; // printf("thread_id %d, jjj %d \n",thread_id,jjj ); if (jjj == jjj_st+1 && full_u_cols[jjj_st] > ncol_max) { printf("allocate more memory for buffer !!!!\n"); if(nbrow * full_u_cols[jjj_st] > buffer_size) printf("%d buffer_size %d\n",nbrow*full_u_cols[jjj_st],buffer_size ); } // #pragma omp barrier /* gathering circuit */ assert(jjj_st<nub); assert(jjj-1<nub); // TAU_STATIC_TIMER_START("GATHER_U"); #ifdef _OPENMP #pragma omp for schedule( SCHEDULE_STRATEGY ) #endif for (j = jjj_st; j < jjj; ++j) { if (j==jjj_st) tempu = bigU; else tempu = bigU + ldu*full_u_cols[j-1]; /* == processing each of the remaining columns == */ arrive_at_ublock(j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid); // tempu = tempU2d; for (jj = iukp; jj < iukp+nsupc; ++jj) { segsize = klst - usub[jj]; if ( segsize ) { lead_zero = ldu - segsize; for (i = 0; i < lead_zero; ++i) tempu[i] = zero; tempu += lead_zero; for (i = 0; i < segsize; ++i) tempu[i] = uval[rukp+i]; rukp += segsize; tempu += segsize; } } rukp -= usub[iukp - 1]; /* Return to start of U(k,j). */ } /* end for j=jjj_st to jjj */ if ( num_streams_used > 0 ) { #ifdef PI_DEBUG printf("nbrow %d *ldu %d =%d < ldt %d * max_row_size %d =%d \n",nbrow,ldu,nbrow*ldu,ldt,max_row_size,ldt*max_row_size ); assert(nbrow*ldu<=ldt*max_row_size); #endif cudaMemcpy2DAsync(dA, nbrow*sizeof(doublecomplex), &lusup[luptr+(knsupc-ldu)*nsupr], nsupr*sizeof(doublecomplex), nbrow*sizeof(doublecomplex), ldu, cudaMemcpyHostToDevice, streams[0]); } for (int i = 0; i < num_streams_used; ++i) { int st = (i==0) ? ncpu_blks+jjj_st : jjj_st+stream_end_col[i-1]; int st_col = full_u_cols[st-1]; int num_col_stream = full_u_cols[jjj_st+stream_end_col[i]-1]-full_u_cols[st-1]; tempu = bigU; doublecomplex *tempv1 = bigV + full_u_cols[st-1]*nbrow; /* Following is for testing purpose */ #ifdef GPU_ACC int stream_id = i; int b_offset = ldu * st_col; int c_offset = st_col * nbrow; size_t B_stream_size = ldu * num_col_stream * sizeof(doublecomplex); size_t C_stream_size = nbrow * num_col_stream * sizeof(doublecomplex); assert(ldu*(st_col+num_col_stream) < bigu_size); assert(nbrow*(st_col+num_col_stream) < buffer_size); cudaMemcpyAsync(dB+b_offset, tempu+b_offset, B_stream_size, cudaMemcpyHostToDevice, streams[stream_id]); cublasCheckErrors( cublasSetStream(handle[stream_id], streams[stream_id]) ); cublasCheckErrors( cublasZgemm(handle[stream_id], CUBLAS_OP_N, CUBLAS_OP_N, nbrow, num_col_stream, ldu, (const cuDoubleComplex*) &alpha, (const cuDoubleComplex*) dA, nbrow, (const cuDoubleComplex*) &dB[b_offset], ldu, (const cuDoubleComplex*) &beta, (cuDoubleComplex*)&dC[c_offset], nbrow) ); checkCuda( cudaMemcpyAsync(tempv1, dC+c_offset, C_stream_size, cudaMemcpyDeviceToHost, streams[stream_id]) ); #else if ( num_col_stream > 0 ) { my_zgemm_("N", "N", &nbrow, &num_col_stream, &ldu, &alpha, &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, tempu+ldu*st_col, &ldu, &beta, tempv1, &nbrow, 1, 1); } #endif } /* end for i = 1 to num_streams used */ int num_col = full_u_cols[jjj_st+ncpu_blks-1]; int st_col = 0; /*special case for cpu */ tempv = bigV + nbrow * st_col; tempu = bigU; double tstart = SuperLU_timer_(); #if defined (USE_VENDOR_BLAS) zgemm_("N", "N", &nbrow, &num_col, &ldu, &alpha, &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, tempu+ldu*st_col, &ldu, &beta, tempv, &nbrow, 1, 1); #else zgemm_("N", "N", &nbrow, &num_col, &ldu, &alpha, &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, tempu+ldu*st_col, &ldu, &beta, tempv, &nbrow); #endif gemm_timer += SuperLU_timer_() -tstart; stat->ops[FACT] += 2 * nbrow * ldu * full_u_cols[jjj-1]; // printf("after zgemm \n"); /* Now scattering blocks handled by cpu */ int temp_ncol; /* scatter first blocks which cpu has computated*/ tstart = SuperLU_timer_(); #ifdef _OPENMP #pragma omp parallel \ private(j,iukp,rukp, tempu, tempv, cum_nrow, jb, nsupc,ljb, \ segsize,lead_zero, \ ib, temp_nbrow,ilst,lib,index, \ ijb,fnz,ucol,rel,ldv,lptrj,luptrj, \ nzval, lb , jj, i) \ firstprivate(luptr,lptr) default (shared) #endif { int thread_id = omp_get_thread_num(); int* indirect_thread = indirect + ldt*thread_id; int* indirect2_thread = indirect2 + ldt*thread_id; doublecomplex* tempv1; if (ncpu_blks< omp_get_num_threads()) { // TAU_STATIC_TIMER_START("SPECIAL_CPU_SCATTER"); for (j = jjj_st; j < jjj_st+ncpu_blks; ++j) { /* code */ #ifdef PI_DEBUG printf("scattering %d block column\n",j); #endif /* == processing each of the remaining columns == */ if(j==jjj_st) tempv1 = bigV; else tempv1 = bigV + full_u_cols[j-1]*nbrow; arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid ); cum_nrow =0 ; /* do update with the kth column of L and (k,j)th block of U */ lptr = lptr0; luptr = luptr0; #ifdef _OPENMP #pragma omp for schedule( SCHEDULE_STRATEGY ) nowait #endif for (lb = 0; lb < nlb; lb++ ) { int cum_nrow = 0; int temp_nbrow; lptr = lptr0; luptr = luptr0; for (int i = 0; i < lb; ++i) { ib = lsub[lptr]; /* Row block L(i,k). */ temp_nbrow = lsub[lptr+1]; /* Number of full rows. */ lptr += LB_DESCRIPTOR; /* Skip descriptor. */ lptr += temp_nbrow; luptr += temp_nbrow; cum_nrow +=temp_nbrow; } ib = lsub[lptr]; /* Row block L(i,k). */ temp_nbrow = lsub[lptr+1]; /* Number of full rows. */ assert(temp_nbrow<=nbrow); lptr += LB_DESCRIPTOR; /* Skip descriptor. */ /* Now gather the result into the destination block. */ if ( ib < jb ) { /* A(i,j) is in U. */ #ifdef PI_DEBUG printf("cpu scatter \n"); printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_u ( ib,jb, nsupc,iukp,xsup, klst,nbrow, lptr,temp_nbrow,lsub, usub,tempv, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { /* A(i,j) is in L. */ #ifdef PI_DEBUG printf("cpu scatter \n"); printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_l ( ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr, temp_nbrow,usub,lsub,tempv, indirect_thread,indirect2_thread, Lrowind_bc_ptr,Lnzval_bc_ptr,grid ); } /* if ib < jb ... */ lptr += temp_nbrow; luptr += temp_nbrow; cum_nrow += temp_nbrow; } /* for lb ... */ luptr=luptr0; } /* for j = jjj_st ... */ // TAU_STATIC_TIMER_STOP("SPECIAL_CPU_SCATTER"); } else { #ifdef _OPENMP #pragma omp for schedule(SCHEDULE_STRATEGY) nowait #endif for (j = jjj_st; j < jjj_st+ncpu_blks; ++j) { /* code */ #ifdef PI_DEBUG printf("scattering %d block column\n",j); #endif /* == processing each of the remaining columns == */ if(j==jjj_st) tempv1 = bigV; else tempv1 = bigV + full_u_cols[j-1]*nbrow; arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid ); cum_nrow =0 ; /* do update with the kth column of L and (k,j)th block of U */ lptr = lptr0; luptr = luptr0; for (lb = 0; lb < nlb; lb++ ) { ib = lsub[lptr]; /* Row block L(i,k). */ temp_nbrow = lsub[lptr+1]; /* Number of full rows. */ assert(temp_nbrow<=nbrow); lptr += LB_DESCRIPTOR; /* Skip descriptor. */ #ifdef DGEMM_STAT if(j==jjj_st) { temp_ncol = full_u_cols[j]; } else { temp_ncol = full_u_cols[j]- full_u_cols[j-1]; } printf("%d %d %d \n",temp_nbrow, temp_ncol,ldu); #endif /* Now gather the result into the destination block. */ if ( ib < jb ) { /* A(i,j) is in U. */ #ifdef PI_DEBUG printf("cpu scatter \n"); printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_u ( ib,jb, nsupc,iukp,xsup, klst,nbrow, lptr,temp_nbrow,lsub, usub,tempv, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { /* A(i,j) is in L. */ #ifdef PI_DEBUG printf("cpu scatter \n"); printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_l ( ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr, temp_nbrow,usub,lsub,tempv, indirect_thread,indirect2_thread, Lrowind_bc_ptr,Lnzval_bc_ptr,grid ); } /* if ib < jb ... */ lptr += temp_nbrow; luptr += temp_nbrow; cum_nrow += temp_nbrow; } /* for lb ... */ luptr=luptr0; } /* for j = jjj_st ... */ } /* else if (ncpu_blks >= omp_get_num_threads()) */ } /* parallel region */ scatter_timer += SuperLU_timer_() - tstart; #ifdef _OPENMP #pragma omp parallel \ private(j,iukp,rukp, tempu, tempv, cum_nrow, jb, nsupc,ljb, \ segsize,lead_zero, \ ib, temp_nbrow,ilst,lib,index, \ ijb,fnz,ucol,rel,ldv,lptrj,luptrj, \ nzval, lb , jj, i) \ firstprivate(luptr,lptr) default (shared) #endif { int thread_id = omp_get_thread_num(); int* indirect_thread = indirect + ldt*thread_id; int* indirect2_thread = indirect2 + ldt*thread_id; doublecomplex* tempv1; for(i = 0; i < num_streams_used; i++) { /* i is private variable */ checkCuda(cudaStreamSynchronize (streams[i])); int jjj_st1 = (i==0) ? jjj_st + ncpu_blks : jjj_st + stream_end_col[i-1]; int jjj_end = jjj_st + stream_end_col[i]; assert(jjj_end-1<nub); assert(jjj_st1>jjj_st) ; /* now scatter it */ #pragma omp for schedule( SCHEDULE_STRATEGY ) nowait for (j = jjj_st1; j < jjj_end; ++j) { /* code */ #ifdef PI_DEBUG printf("scattering %d block column\n",j); #endif /* == processing each of the remaining columns == */ if(j==jjj_st) tempv1 = bigV; else tempv1 = bigV + full_u_cols[j-1]*nbrow; arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid ); cum_nrow =0 ; /* do update with the kth column of L and (k,j)th block of U */ lptr = lptr0; luptr = luptr0; for (lb = 0; lb < nlb; lb++) { ib = lsub[lptr]; /* Row block L(i,k). */ temp_nbrow = lsub[lptr+1]; /* Number of full rows. */ assert(temp_nbrow<=nbrow); lptr += LB_DESCRIPTOR; /* Skip descriptor. */ #ifdef DGEMM_STAT if(j==jjj_st) { temp_ncol = full_u_cols[j]; } else { temp_ncol = full_u_cols[j]- full_u_cols[j-1]; } printf("%d %d %d \n",temp_nbrow, temp_ncol,ldu); #endif /* Now gather the result into the destination block. */ if ( ib < jb ) { /* A(i,j) is in U. */ #ifdef PI_DEBUG printf("gpu scatter \n"); printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_u ( ib,jb, nsupc,iukp,xsup, klst,nbrow, lptr,temp_nbrow,lsub, usub,tempv, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { /* A(i,j) is in L. */ #ifdef PI_DEBUG printf("gpu scatter \n"); printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_l ( ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr, temp_nbrow,usub,lsub,tempv, indirect_thread,indirect2_thread, Lrowind_bc_ptr,Lnzval_bc_ptr,grid ); } /* if ib < jb ... */ lptr += temp_nbrow; luptr += temp_nbrow; cum_nrow += temp_nbrow; } /* for lb ... */ luptr=luptr0; } /* for j = jjj_st ... */ } /* end for i = 0 to nstreams */ // TAU_STATIC_TIMER_STOP("GPU_SCATTER"); // TAU_STATIC_TIMER_STOP("INSIDE_OMP"); } /* end pragma omp parallel */ // TAU_STATIC_TIMER_STOP("OUTSIDE_OMP"); } /* end while(jjj<nub) */ } /* if nbrow>0 */ } /* if msg1 and msg 2 */
mkl_quantized_conv_ops.h
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_ #define TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_ #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/tensor.h" #ifdef INTEL_MKL namespace tensorflow { template <class T> float MklFloatForOneQuantizedLevel(float range_min, float range_max) { int64 highest = static_cast<int64_t>(Eigen::NumTraits<T>::highest()); int64 lowest = static_cast<int64_t>(Eigen::NumTraits<T>::lowest()); // Adjusting for having a symmetric range. // for example: for 8-bit [-127, 127] as opposed to [-128, 127]. if (lowest < -highest) ++lowest; const float float_for_one_quantized_level = (range_max - range_min) / (highest - lowest); return float_for_one_quantized_level; } template <class T1, class T2, class T3> void MklQuantizationRangeForMultiplication(float min_a, float max_a, float min_b, float max_b, float* min_c, float* max_c) { const float a_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T1>(min_a, max_a); const float b_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T2>(min_b, max_b); const int64 c_highest = static_cast<int64_t>(Eigen::NumTraits<T3>::highest()); const int64 c_lowest = static_cast<int64_t>(Eigen::NumTraits<T3>::lowest()); const float c_float_for_one_quant_level = a_float_for_one_quant_level * b_float_for_one_quant_level; *min_c = c_float_for_one_quant_level * c_lowest; *max_c = c_float_for_one_quant_level * c_highest; } template <class T1, class T2, class T3> void MklQuantizationRangeForMultiplication(float min_a, float max_a, const Tensor& min_b_vector, const Tensor& max_b_vector, Tensor** min_c_vector, Tensor** max_c_vector) { DCHECK(min_b_vector.NumElements() == (*min_c_vector)->NumElements()); DCHECK(max_b_vector.NumElements() == (*max_c_vector)->NumElements()); size_t n_channel = min_b_vector.NumElements(); const int64 c_highest = static_cast<int64_t>(Eigen::NumTraits<T3>::highest()); const int64 c_lowest = static_cast<int64_t>(Eigen::NumTraits<T3>::lowest()); const float* min_b = min_b_vector.flat<float>().data(); const float* max_b = max_b_vector.flat<float>().data(); float* min_c = (*min_c_vector)->flat<float>().data(); float* max_c = (*max_c_vector)->flat<float>().data(); #ifdef ENABLE_ONEDNN_OPENMP #pragma omp parallel for #endif // ENABLE_ONEDNN_OPENMP // TODO: Add eigen parallel_for for (int64_t n = 0; n < n_channel; ++n) { float a_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T1>(min_a, max_a); float b_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T2>(min_b[n], max_b[n]); float c_float_for_one_quant_level = a_float_for_one_quant_level * b_float_for_one_quant_level; min_c[n] = c_float_for_one_quant_level * c_lowest; max_c[n] = c_float_for_one_quant_level * c_highest; } } } // namespace tensorflow #endif // INTEL_MKL #endif // TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_
pr61200.c
/* PR libgomp/61200 */ int main () { int var = 1; #pragma omp parallel if (var != 1) __builtin_abort (); #pragma omp task shared(var) var = 2; return 0; }
GB_binop__lxor_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint16) // A*D function (colscale): GB (_AxD__lxor_uint16) // D*A function (rowscale): GB (_DxB__lxor_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint16) // C=scalar+B GB (_bind1st__lxor_uint16) // C=scalar+B' GB (_bind1st_tran__lxor_uint16) // C=A+scalar GB (_bind2nd__lxor_uint16) // C=A'+scalar GB (_bind2nd_tran__lxor_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_UINT16 || GxB_NO_LXOR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lxor_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
wf3cte.c
/* WFC3 -- CTE loss correction for UVIS M. sosey Aug-2014 Adapted for the pipeline from Jay Andersons CTE correction code for wfc3 UVIS raw2raz_wfc3uv.F , an edited file was delivered december 2014, and both are different from the fortran code currently served on the wfc3 website. M. Sosey Aug-2016 Adapted to be used with Subarrays as well as full frame arrays, as long as the subarray contains physical overscan pixels, which don't include the science team subarrays which can span quads. M.D. De La Pena Dec-2019: This routine has been significantly upgraded by Jay Anderson (JA) and delivered in November 2019. As JA is the important resource for this algorithm, I have only cleaned up the comments in his original delivered version, fixed up brace placement, and created defines for some of the hard-coded values. Minimal changes were done explicitly to keep the code in a form familiar to JA for possible future modifications. M.D. De La Pena Mar-2020: Further changes to accommodate subarrays - only evaluate valid (non-zero) pixels. Updates received from Jay Anderson. Removed deprecated routines: find_dadj, rsz2rsc, inverse_cte_blur, and raz2rsz. Small bug found in original subarray code during testing. M.D. De La Pena Apr-2021: Fix to address a problem detected when processing a Tungsten flat with a high background. Uninitialized values were used for further computation causing an eventual exception. */ # include <time.h> # include <string.h> # include <math.h> # include <stdlib.h> # include <stdio.h> # include <float.h> # ifdef _OPENMP # include <omp.h> # endif # include "hstcal.h" # include "hstio.h" # include "wf3.h" # include "wf3info.h" # include "hstcalerr.h" # include "wf3corr.h" # include "cte.h" # include "trlbuf.h" /* These are defined in wf3.h. NAMPS 4 RAZ_COLS 8412 RAZ_ROWS 2070 */ # define NITMAX 299 /* Maximum number of iterations */ # define P_OVRSCN 25 /* Physical overscan */ # define V_OVRSCNX2 60 /* Virtual overscan x 2 */ # define XAMP_SCI_DIM 2048 /* X dimension of each AMP of science pixels */ # define YAMP_SCI_DIM 2051 /* Y dimension of each AMP of science pixels */ # define WsMAX 999 /* Maximum number of traps */ /* Used in find_raz2rnoival */ # define SPREAD_FOR_HISTO 4.5 # define LOW_CLIP 3.75 # define HIGH_CLIP 9.75 # define NUM_BINS 1001 int sub_ctecor_v2c(float *, float *, int, double *, double *, float *, float *, float, float, int, int, float *); float find_raz2rnoival(float *, float *, float *); int WF3cte (char *input, char *output, CCD_Switch *cte_sw, RefFileInfo *refnames, int printtime, int verbose, int onecpu) { /* input: filename output: filename cte_sw: the calibration flags refnames: the names of the calibration reference files onecpu: use parallel processing? The following are new primary header keywords which will be added to the data so that they can be updated by the code. They are also specified in the PCTETAB reference file. These are taken from the PCTETAB CTE_NAME - name of cte algorithm CTE_VER - version number of cte algorithm CTEDATE0 - date of wfc3/uvis installation in HST, in MJD CTEDATE1 - reference date of CTE model pinning, in MJD PCTETLEN - max length of CTE trail PCTESMIT - number of iterations used in CTE forward modeling PCTESHFT - number of iterations used in the parallel transfer PCTENSMD - readnoise mitigation algorithm PCTETRSH - over-subtraction threshold PCTEFRAC - cte scaling frac calculated from expstart PCTERNOI - the readnoise clipping level to use ***NOTE: This value is no longer used from the PCTETAB. If PCTERNOI keyword value in the raw science image header is non-zero, it will be used for the CTE computations. Otherwise, the value is computed on-the-fly based upon the raw image data. (March 2020) #These are taken from getreffiles.c DRKCFILE is a new dark reference file used only in the CTE branch *_DRC.fits BIACFILE is a new super-bias reference file used only in the CTE branch *_BIC.fits PCTETAB is a new reference file FITS table which will contain the software parameter switches for the CTE correction *_CTE.fit This is the main workhorse function for removing the CTE from WFC3 UVIS images Unfortunately this happens before anything else in wfc3, so there's a lot of reading files at the beginning in order to populate needed information. The rest of the pipeline works on one chip at a time and the structures are all defined to support that. None of these structures are defined until the code enters the single chip loops. This differs from the CTE correction in ACS which occurs later in the process after basic structures are defined. */ extern int status; WF3Info wf3; /*structure with calibration switches and reference files for passing*/ Hdr phdr; /*primary header for input image, all output information saved here*/ Hdr scihdr; /*science header in case of subarray image to detect chip*/ IODescPtr ip = NULL; CTEParams cte_pars; /*STRUCTURE HOLDING THE MODEL PARAMETERS*/ SingleGroup cd; /*SCI 1, chip 2*/ SingleGroup ab; /*SCI 2, chip 1*/ SingleGroup subcd; /*subarray chip*/ SingleGroup subab; /*subarray chip*/ SingleGroup raz; /* THE LARGE FORMAT COMBINATION OF CDAB*/ SingleGroup rsz; /* LARGE FORMAT READNOISE CORRECTED IMAGE */ SingleGroup rsc; /* CTE CORRECTED*/ SingleGroup rzc; /* FINAL CTE CORRECTED IMAGE */ SingleGroup chg; /* THE CHANGE DUE TO CTE */ SingleGroup raw; /* THE RAW IMAGE IN RAZ FORMAT */ int i,j; /*loop vars*/ int max_threads=1; clock_t begin; double time_spent; float hardset=0.0; /* These are used to find subarrays with physical overscan */ int sci_bin[2]; /* bin size of science image */ int sci_corner[2]; /* science image corner location */ int ref_bin[2]; int ref_corner[2]; int rsize = 1; /* reference pixel size */ int start=0; /*where the subarray starts*/ int finish=0; /*where the subarray ends*/ /* init header vars */ initHdr(&phdr); initHdr(&scihdr); float readNoise = 0.0; int ret; /*check if this is a subarray image. This is necessary because the CTE routine will start with the raw images from scratch and read them in so that both chips can be used. CTE is outside of the normal processing where one chip goes through the pipeline at a time, both chips are used at the same time for the correction. For the case of subarrays, a fake second chip needs to be created. The subarray is also placed inside the confines of a full size image and a mask is created to ignore pixels not associated with the original data during the cte correction. This is necessary because the pixel location itself is used as part of the correction. A secondary option would be to set the looping arrays to variable sizes and make sure all array references were consistent with the current data being processed. I decided on masking which might allow for other considerations in future updates. Only subarrays which were taken with physical overscan pixels are currently valid This distinction can be made with the CRDS ruleset for PCTECORR but it should also be checked here incase users update the header themselves for local runs. In order to check for overscan pixels I'm using the array start location instead of the APERTURE keyword information (there are known user apertures which do not have overscan pixels, but this gets around string comparisons and any future name changes or aperture additions in the future) */ begin = (double)clock(); /*CONTAIN PARALLEL PROCESSING TO A SINGLE THREAD AS USER OPTION*/ # ifdef _OPENMP trlmessage("Using parallel processing provided by OpenMP inside CTE routine"); if (onecpu){ omp_set_dynamic(0); max_threads=1; sprintf(MsgText,"onecpu == TRUE, Using only %i threads/cpu", max_threads); } else { omp_set_dynamic(0); max_threads = omp_get_num_procs(); /*be nice, use 1 less than avail?*/ sprintf(MsgText,"Setting max threads to %i of %i cpus",max_threads, omp_get_num_procs()); } omp_set_num_threads(max_threads); trlmessage(MsgText); # endif /* COPY COMMAND-LINE ARGUMENTS INTO WF3. */ WF3Init (&wf3); /*sets default information*/ strcpy (wf3.input, input); strcpy (wf3.output, output); PrBegin ("WFC3CTE"); if (wf3.printtime) TimeStamp("WFC3CTE Started: ",wf3.rootname); /* CHECK WHETHER THE OUTPUT FILE ALREADY EXISTS. */ if (FileExists (wf3.output)){ WhichError(status); return (ERROR_RETURN); } wf3.pctecorr = cte_sw->pctecorr; wf3.darkcorr = cte_sw->darkcorr; wf3.biascorr = cte_sw->biascorr; wf3.blevcorr = cte_sw->blevcorr; wf3.printtime = printtime; wf3.verbose = verbose; wf3.refnames = refnames; PrFileName ("input", wf3.input); PrFileName ("output", wf3.output); if (wf3.biascorr == COMPLETE){ trlmessage("BIASCORR complete for input image, CTE can't be performed"); return(ERROR_RETURN); } if (wf3.darkcorr == COMPLETE){ trlmessage("DARKCORR complete for input image, CTE can't be performed"); return(ERROR_RETURN); } if (wf3.blevcorr == COMPLETE){ trlmessage("BLEVCORR complete for input image, CTE can't be performed"); return(ERROR_RETURN); } /* DETERMINE THE NAMES OF THE TRAILER FILES BASED ON THE INPUT AND OUTPUT FILE NAMES, THEN INITIALIZE THE TRAILER FILE BUFFER WITH THOSE NAMES. */ if (initCTETrl (input, output)) return (status); /* OPEN INPUT IMAGE IN ORDER TO READ ITS PRIMARY HEADER. */ if (LoadHdr (wf3.input, &phdr) ){ WhichError(status); return (ERROR_RETURN); } /* GET KEYWORD VALUES FROM PRIMARY HEADER. */ if (GetKeys (&wf3, &phdr)) { freeHdr (&phdr); return (status); } if (GetCTEFlags (&wf3, &phdr)) { freeHdr(&phdr); return (status); } /*SET UP THE ARRAYS WHICH WILL BE PASSED AROUND*/ initSingleGroup(&raz); allocSingleGroup(&raz, RAZ_COLS, RAZ_ROWS, True); initSingleGroup(&rsz); allocSingleGroup(&rsz, RAZ_COLS, RAZ_ROWS, True); initSingleGroup(&rsc); allocSingleGroup(&rsc, RAZ_COLS, RAZ_ROWS, True); initSingleGroup(&rzc); allocSingleGroup(&rzc, RAZ_COLS, RAZ_ROWS, True); initSingleGroup(&raw); allocSingleGroup(&raw, RAZ_COLS, RAZ_ROWS, True); initSingleGroup(&chg); allocSingleGroup(&chg, RAZ_COLS, RAZ_ROWS, True); /*hardset the science arrays*/ for (i=0;i<RAZ_COLS;i++){ for(j=0;j<RAZ_ROWS;j++){ Pix(raw.sci.data,i,j)=hardset; Pix(raz.sci.data,i,j)=hardset; Pix(rsz.sci.data,i,j)=hardset; Pix(rsc.sci.data,i,j)=hardset; Pix(rzc.sci.data,i,j)=hardset; Pix(chg.sci.data,i,j)=hardset; } } /*READ IN THE CTE PARAMETER TABLE*/ initCTEParams(&cte_pars); if (GetCTEPars (wf3.pctetab.name, &cte_pars)) return (status); if (verbose){ PrRefInfo ("pctetab", wf3.pctetab.name, wf3.pctetab.pedigree, wf3.pctetab.descrip, wf3.pctetab.descrip2); } /* Full frame and subarrays always have group 1 If it's a subarray, the group can be from either chip and will still be labled group 1 because it's the FIRST and only group, so look at the ccdchip instead. amps ab are in chip1, sci,2 amps cd are in chip2, sci,1 */ if (wf3.subarray) { /* OPEN INPUT IMAGE IN ORDER TO READ ITS SCIENCE HEADER. */ ip = openInputImage (wf3.input, "SCI", 1); if (hstio_err()) { sprintf (MsgText, "Image: \"%s\" is not present", wf3.input); trlerror (MsgText); return (status = OPEN_FAILED); } getHeader (ip, &scihdr); if (ip != NULL) closeImage (ip); /* Get CCD-specific parameters. */ if (GetKeyInt (&scihdr, "CCDCHIP", USE_DEFAULT, 1, &wf3.chip)){ freeHdr(&scihdr); return (status); } freeHdr(&scihdr); if (wf3.chip == 2){ /*sci1,cd*/ start=0; finish=0; /*get CD subarray from first extension*/ initSingleGroup (&subcd); getSingleGroup (wf3.input, 1, &subcd); if (hstio_err()){ freeSingleGroup(&subcd); return (status = OPEN_FAILED); } /*create an empty full size chip for pasting*/ initSingleGroup(&cd); allocSingleGroup(&cd,RAZ_COLS/2,RAZ_ROWS, True); cd.group_num=1; CreateEmptyChip(&wf3, &cd); if (GetCorner(&subcd.sci.hdr, rsize, sci_bin, sci_corner)) return (status); if (GetCorner(&cd.sci.hdr, rsize, ref_bin, ref_corner)) return (status); start = sci_corner[0] - ref_corner[0]; finish = start + subcd.sci.data.nx; if ( start >= P_OVRSCN && finish + V_OVRSCNX2 <= (RAZ_COLS/2) - P_OVRSCN){ sprintf(MsgText,"Subarray not taken with physical overscan (%i %i)\nCan't perform CTE correction\n",start,finish); trlmessage(MsgText); return(ERROR_RETURN); } /*SAVE THE PCTETABLE INFORMATION TO THE HEADER OF THE SCIENCE IMAGE AFTER CHECKING TO SEE IF THE USER HAS SPECIFIED ANY CHANGES TO THE CTE CODE VARIABLES. */ if (CompareCTEParams(&subcd, &cte_pars)) return (status); /*Put the subarray data into full frame*/ Sub2Full(&wf3, &subcd, &cd, 0, 1, 1); /* now create an empty chip 1*/ initSingleGroup(&ab); allocSingleGroup(&ab,RAZ_COLS/2,RAZ_ROWS, True); ab.group_num=2; CreateEmptyChip(&wf3, &ab); /* SAVE A COPY OF THE RAW IMAGE BEFORE BIAS FOR LATER */ makeRAZ(&cd,&ab,&raw); /* Subtract the BIAC file from the subarray before continuing The bias routine will take care of cutting out the correct image location for the subarray.*/ if (doCteBias(&wf3,&subcd)){ freeSingleGroup(&subcd); return(status); } /*reset the array after bias subtraction*/ Sub2Full(&wf3, &subcd, &cd, 0, 1, 1); } else { /*chip is 1, ab, sci2*/ start=0; finish=0; initSingleGroup(&subab); getSingleGroup(wf3.input, 1, &subab); if (hstio_err()){ freeSingleGroup(&subab); return (status = OPEN_FAILED); } /*make an empty fullsize chip for pasting*/ initSingleGroup(&ab); allocSingleGroup(&ab,RAZ_COLS/2,RAZ_ROWS, True); ab.group_num=2; CreateEmptyChip(&wf3, &ab); if ( GetCorner(&subab.sci.hdr, rsize, sci_bin, sci_corner)) return (status); if ( GetCorner(&ab.sci.hdr, rsize, ref_bin, ref_corner)) return (status); start = sci_corner[0] - ref_corner[0]; finish = start + subab.sci.data.nx; if ( start >= P_OVRSCN && finish + V_OVRSCNX2 <= (RAZ_COLS/2) - P_OVRSCN){ sprintf(MsgText,"Subarray not taken with physical overscan (%i %i)\nCan't perform CTE correction\n",start,finish); trlmessage(MsgText); return(ERROR_RETURN); } /*add subarray to full frame image*/ Sub2Full(&wf3, &subab, &ab, 0, 1, 1); /*SAVE THE PCTETABLE INFORMATION TO THE HEADER OF THE SCIENCE IMAGE AFTER CHECKING TO SEE IF THE USER HAS SPECIFIED ANY CHANGES TO THE CTE CODE VARIABLES. */ if (CompareCTEParams(&subab, &cte_pars)) return (status); /* now create an empty chip 2*/ initSingleGroup(&cd); allocSingleGroup(&cd,RAZ_COLS/2,RAZ_ROWS, True); cd.group_num=1; CreateEmptyChip(&wf3, &cd); /* SAVE A COPY OF THE RAW IMAGE FOR LATER */ makeRAZ(&cd,&ab,&raw); /* Subtract the BIAC file from the subarray before continuing*/ subab.group_num=2; if (doCteBias(&wf3,&subab)){ freeSingleGroup(&subab); return(status); } /*reset the array after bias subtraction*/ Sub2Full(&wf3, &subab, &ab, 0, 1, 1); } } else { /* Full frame image, just read in the groups and init the mask to use all pixels */ initSingleGroup (&cd); getSingleGroup (wf3.input, 1, &cd); if (hstio_err()){ return (status = OPEN_FAILED); } initSingleGroup (&ab); getSingleGroup (wf3.input, 2, &ab); if (hstio_err()){ return (status = OPEN_FAILED); } /*setup the mask*/ for(i=0; i< ab.dq.data.nx; i++){ for(j=0; j< ab.dq.data.ny; j++){ PPix(&ab.dq.data, i, j) = 1; PPix(&cd.dq.data, i, j) = 1; } } /* SAVE A COPY OF THE RAW IMAGE FOR LATER */ makeRAZ(&cd,&ab,&raw); /***SUBTRACT THE CTE BIAS FROM BOTH CHIPS IN PLACE***/ if (doCteBias(&wf3,&cd)){ freeSingleGroup(&cd); return(status); } if (doCteBias(&wf3,&ab)){ freeSingleGroup(&ab); return(status); } /*SAVE THE PCTETABLE INFORMATION TO THE HEADER OF THE SCIENCE IMAGE AFTER CHECKING TO SEE IF THE USER HAS SPECIFIED ANY CHANGES TO THE CTE CODE VARIABLES. */ if (CompareCTEParams(&cd, &cte_pars)) return (status); } /*CONVERT TO RAZ, SUBTRACT BIAS AND CORRECT FOR GAIN*/ if (raw2raz(&wf3, &cd, &ab, &raz)) return (status); SingleGroup fff; initSingleGroup(&fff); allocSingleGroup(&fff, RAZ_COLS, RAZ_ROWS, True); double cte_ff; cte_ff= (wf3.expstart - cte_pars.cte_date0)/ (cte_pars.cte_date1 - cte_pars.cte_date0); printf("CTE_FF: %8.3f \n",cte_ff); cte_pars.scale_frac=cte_ff; for(i=0;i<RAZ_COLS;i++) { for(j=0;j<RAZ_ROWS;j++) { Pix(fff.sci.data,i,j) = cte_ff * (j+1)/((double)XAMP_SCI_DIM); } } /* * If the PCTERNOI value from the primary header of the science image is non-zero, it is * used in the CTE algorithm. Otherwise the read noise must be computed via find_raz2rnoival. * FLOAT_RNOIVAL and FLOAT_BKGDVAL are designed to be for diagnostic purposes only. */ float FLOAT_RNOIVAL = 0.; float FLOAT_BKGDVAL = 0.; readNoise = wf3.pcternoi; sprintf(MsgText, "PCTERNOI: %8.4f (source: primary header of science image)\n\n", readNoise); trlmessage(MsgText); /* Comparison should be OK - read from FITS header and no computation */ if (readNoise == 0.0) { readNoise = find_raz2rnoival(raz.sci.data.data, &FLOAT_RNOIVAL, &FLOAT_BKGDVAL); sprintf(MsgText, "RNOIVAL: %8.4f BKGDVAL: %8.4f\n", FLOAT_RNOIVAL, FLOAT_BKGDVAL); trlmessage(MsgText); sprintf(MsgText, "PCTERNOI: %8.4f (source: computed on-the-fly from science image)", readNoise); trlmessage(MsgText); sprintf(MsgText, "This computed value supersedes any value obtained from the primary\nheader of the science image.\n\n"); trlmessage(MsgText); } /* The PCTERNOI value actually used is written to the PCTERNOI keyword in * the output image header when it is updated below for a final time. */ /* Invoke the updated CTE correction which does the read noise mitigation in each of the three forward-model iterations. */ trlmessage("CTE: jumping into the routine..."); ret = sub_ctecor_v2c(raz.sci.data.data, fff.sci.data.data, WsMAX, cte_pars.qlevq_data, cte_pars.dpdew_data, cte_pars.rprof->data.data, cte_pars.cprof->data.data, readNoise, cte_pars.thresh, cte_pars.n_forward, cte_pars.n_par, rzc.sci.data.data); trlmessage("CTE: returning from the routine..."); for (i=0;i<RAZ_COLS;i++){ for (j=0;j<RAZ_ROWS;j++){ Pix(chg.sci.data,i,j) = (Pix(rzc.sci.data,i,j) - Pix(raz.sci.data,i,j))/wf3.ccdgain; Pix(rzc.sci.data,i,j) = Pix(raw.sci.data,i,j) + Pix(chg.sci.data,i,j); } } freeSingleGroup(&fff); /*BACK TO NORMAL FORMATTING*/ /*Copies rzc data to cd->sci.data and ab->sci.data */ undoRAZ(&cd,&ab,&rzc); /* COPY BACK THE SCIENCE SUBARRAYS AND SAVE THE NEW RAW FILE WITH UPDATED SCIENCE ARRAYS AND PRIMARY HEADER TO RAC */ if (wf3.subarray) { if (wf3.chip == 2) { /*** SAVE USEFUL HEADER INFORMATION ***/ if (cteHistory (&wf3, subcd.globalhdr)) return (status); /*UPDATE THE OUTPUT HEADER ONE FINAL TIME*/ PutKeyDbl(subcd.globalhdr, "PCTEFRAC", cte_pars.scale_frac,"CTE scaling fraction based on expstart"); trlmessage("PCTEFRAC saved to header"); PutKeyFlt(subcd.globalhdr, "PCTERNOI", readNoise,"read noise amp clip limit"); trlmessage("PCTERNOI saved to header"); Full2Sub(&wf3, &subcd, &cd, 0, 1, 1); putSingleGroup(output, 1, &subcd,0); freeSingleGroup(&subcd); } else { /*** SAVE USEFUL HEADER INFORMATION ***/ if (cteHistory (&wf3, subab.globalhdr)) return (status); /*UPDATE THE OUTPUT HEADER ONE FINAL TIME*/ PutKeyDbl(subab.globalhdr, "PCTEFRAC", cte_pars.scale_frac,"CTE scaling fraction based on expstart"); trlmessage("PCTEFRAC saved to header"); PutKeyFlt(subab.globalhdr, "PCTERNOI", readNoise,"read noise amp clip limit"); trlmessage("PCTERNOI saved to header"); Full2Sub(&wf3, &subab, &ab, 0, 1, 1); putSingleGroup(output, 1, &subab,0); freeSingleGroup(&subab); } } else { /*FUll FRAME*/ /*** SAVE USEFUL HEADER INFORMATION ***/ if (cteHistory (&wf3, cd.globalhdr)) return (status); /*UPDATE THE OUTPUT HEADER ONE FINAL TIME*/ PutKeyDbl(cd.globalhdr, "PCTEFRAC", cte_pars.scale_frac,"CTE scaling fraction based on expstart"); trlmessage("PCTEFRAC saved to header"); PutKeyFlt(cd.globalhdr, "PCTERNOI", readNoise,"read noise amp clip limit"); trlmessage("PCTERNOI saved to header"); putSingleGroup(output,cd.group_num, &cd,0); putSingleGroup(output,ab.group_num, &ab,0); } /** CLEAN UP ON AISLE 3 **/ freeSingleGroup(&rzc); freeSingleGroup(&rsc); freeSingleGroup(&chg); freeSingleGroup(&raz); freeSingleGroup(&rsz); freeSingleGroup(&raw); freeSingleGroup(&cd); freeSingleGroup(&ab); time_spent = ((double) clock()- begin +0.0) / CLOCKS_PER_SEC; if (verbose){ sprintf(MsgText,"CTE run time: %.2f(s) with %i procs/threads\n",time_spent/max_threads,max_threads); trlmessage(MsgText); } PrSwitch("pctecorr", COMPLETE); if(wf3.printtime) TimeStamp("PCTECORR Finished",wf3.rootname); return (status); } /********************* SUPPORTING SUBROUTINES *****************************/ int raw2raz(WF3Info *wf3, SingleGroup *cd, SingleGroup *ab, SingleGroup *raz){ /* convert a raw file to raz file: CDAB longwise amps, save data array for comparison with what jay has during testing -->do an additional bias correction using the residual bias level measured for each amplifier from the steadiest pixels in the horizontal overscan and subtracted fom the pixels for that amplifier. ---> convert into electrons at the end ---> add supplemental bias info to the header allocate contiguous 2d array on the heap with pointers and return the pointer to the head of the array The Following macros are used to represent 2-d indexing. Two dimensional arrays are stored in FITS order. ny ^ N | a05 a15 a25 a35 A | a04 a14 a24 a34 X | a03 a13 a23 a33 I | a02 a12 a22 a32 S | a01 a11 a21 a31 2 | a00 a10 a20 a30 ---------------------------> nx NAXIS1 NAXIS1 is 4 and NAXIS2 is 6 PIX(a,1,4) accesses a14 In the raz image, each quadrant has been rotated such that the readout amp is located at the lower left. The reoriented four quadrants are then arranged into a single 8412x2070 image (science pixels plus overscan), with amps C, D, A, and B, in that order. In the raz image, pixels are all parallel-shifted down, then serial-shifted to the left. */ extern int status; int i,j,k; /*loop counters*/ int subcol = (RAZ_COLS/NAMPS); /* for looping over quads */ extern int status; /* variable for return status */ float bias_post[NAMPS]; float bsig_post[NAMPS]; float bias_pre[NAMPS]; float bsig_pre[NAMPS]; float gain; /*INIT THE ARRAYS*/ for(i=0;i<NAMPS;i++){ bias_post[i]=0.; bsig_post[i]=0.; bias_pre[i]=0.; bsig_pre[i]=0.; } gain=wf3->ccdgain; /*REFORMAT TO RAZ*/ makeRAZ(cd,ab,raz); /*SUBTRACT THE EXTRA BIAS CALCULATED, AND MULTIPLY BY THE GAIN Note that for user subarray the image is in only 1 quad, and only has prescan bias pixels so the regions are different for full and subarrays */ if (wf3->subarray){ findPreScanBias(raz, bias_pre, bsig_pre); for (k=0;k<NAMPS;k++){ for (i=0; i<subcol;i++){ for (j=0;j<RAZ_ROWS; j++){ if(Pix(raz->dq.data,i+k*subcol,j)){ Pix(raz->sci.data,i+k*subcol,j) -= bias_pre[k]; Pix(raz->sci.data,i+k*subcol,j) *= gain; } } } } } else { findPostScanBias(raz, bias_post, bsig_post); for (k=0;k<NAMPS;k++){ for (i=0; i<subcol;i++){ for (j=0;j<RAZ_ROWS; j++){ Pix(raz->sci.data,i+k*subcol,j) -= bias_post[k]; Pix(raz->sci.data,i+k*subcol,j) *= gain; } } } } return(status); } /*calculate the post scan and bias after the biac file has been subtracted add some history information to the header Jay gave no explanation why plist is limited to 55377 for full arrays, his subarray limitation was just 1/4 of this value. The value 55377 is the number of post-scan pixels in the physical pixel vertical extent (27 x 2051 = 55377). Value 2051 is the veritical number of science pixels in an amp, and 27 is the 30 post-scan pixels with two pixels stripped from the left boundary and one stripped from the right boundary. the serial virtual overscan pixels are also called the trailing-edge pixels these only exist in full frame images */ int findPostScanBias(SingleGroup *raz, float *mean, float *sigma){ extern int status; int arrsize = 55377; int i,j,k; /*Looping variables */ float plist[arrsize]; /*bias bpixels to measure*/ float *plistSub; float min=0.0; float max=0.0; float rmean=0.0; float rsigma=0.0; float sigreg =7.5; /*sigma clip*/ int subcol = RAZ_COLS/4; int npix=0; /*track array size for resistant mean*/ /*init plist for full size We'll allocate heap memory for smaller arrays */ for (i=0;i<arrsize;i++){ plist[i]=0.; } for (k=0;k<NAMPS;k++){ /*for each quadrant cdab = 0123*/ npix=0; /*reset for each quad*/ rmean=0.; rsigma=0.; for (i=RAZ_ROWS+5;i<= subcol-1; i++){ /*quad area for post scan bias pixels*/ for (j=0; j<YAMP_SCI_DIM; j++){ if (npix < arrsize){ if ( Pix(raz->dq.data,i+k*subcol,j)) { plist[npix] = Pix(raz->sci.data,i+k*subcol,j); npix+=1; } } } } if (npix > 0 ){ plistSub = (float *) calloc(npix, sizeof(float)); if (plistSub == NULL){ trlerror("out of memory for resistmean entrance in findPostScanBias."); free(plistSub); return (ERROR_RETURN); } for(i=0; i<npix; i++){ plistSub[i]=plist[i]; } resistmean(plistSub, npix, sigreg, &rmean, &rsigma, &min, &max); free(plistSub); } mean[k]= rmean; sigma[k] = rsigma; } return status; } /*CALCULATE THE PRE SCAN AND BIAS AFTER THE BIAC FILE HAS BEEN SUBTRACTED The serial physical overscan pixels are also known as the serial prescan, they are the only pixels available for subarrays. For full frame arrays the prescan is not used as part of the correction, instead the virtual overscan pixels are used and modeled in findPostScanBias. */ int findPreScanBias(SingleGroup *raz, float *mean, float *sigma){ /** this calls resistmean, which does a better job clipping outlying pixels that just a standard stddev clip single pass*/ extern int status; int arrsize = 55377; int i,j,k; /*Looping variables */ float plist[arrsize]; /*bias pixels to measure*/ float *plistSub; /*heap allocation for variable size plist array*/ float min=0.0; float max=0.0; float rmean; float rsigma; float sigreg =7.5; /*sigma clip*/ int subcol = RAZ_COLS/4; int npix=0; /*track array size for resistant mean*/ /*init plist*/ for (i=0;i<arrsize;i++){ plist[i]=0.; } for (k=0;k<NAMPS;k++){ /*for each quadrant, CDAB ordered*/ npix=0; rmean=0.; rsigma=0.; for (i=5;i<P_OVRSCN; i++){ for (j=0; j<YAMP_SCI_DIM; j++){ /*all rows*/ if (npix < arrsize ){ if (Pix(raz->dq.data,i+(k*subcol),j)){ plist[npix] = Pix(raz->sci.data,i+k*subcol,j); npix+=1; } } } } if (0 < npix ){ plistSub = (float *) calloc(npix, sizeof(float)); if (plistSub == NULL){ trlerror("out of memory for resistmean entrance in findPreScanBias."); free(plistSub); return (ERROR_RETURN); } for(i=0; i<npix; i++){ plistSub[i]=plist[i]; } resistmean(plistSub, npix, sigreg, &rmean, &rsigma, &min, &max); free(plistSub); } mean[k]= rmean; sigma[k] = rsigma; if(npix>0) printf("npix=%i\nmean[%i]=%f\nsigma[%i] = %f\n",npix,k+1,rmean,k+1,rsigma); } return status; } /*This is the workhorse subroutine; it simulates the readout of one column pixi() and outputs this to pixo() using a single iteration. It can be called successively to do the transfer in steps. JDIM == RAZ_ROWS WDIM == TRAPS Ws is the input traps number < WsMAX NITs == cte_pars->n_par These are already in the parameter structure CTEParams int Ws the number of traps < WsMAX float q_w[TRAPS]; the run of charge with level == qlevq_data float dpde_w[TRAPS]; the run of charge loss with level == dpdew_data float rprof_wt[TRAPS][100]; the emission probability as fn of downhill pixel == rprof fits image float cprof_wt[TRAPS][100]; the cumulative probability cprof_t( 1) = 1. - rprof_t(1) == cprof fits image W = wcol_data = trap id q_w[TRAP] = qlev_q from QPROF traps as function of packet size = cte->qlevq_data[TRAP] pixi (curr), pixo (read) , pixf(cteff) are passed and are 1d arrays which have values for a particular column the ttrap reference to the image array has to be -1 for C */ int sim_colreadout_l(double *pixi, double *pixo, double *pixf, CTEParams *cte){ extern int status; int j; int ttrap; int w; double ftrap; double pix_1; double padd_2; double padd_3; double prem_3; double pmax; double fcarry; padd_3=0.0; prem_3=0.0; padd_2=0.0; fcarry=0.0; pix_1=0.0; w=0; j=0; ftrap=0.0; ttrap=0; FloatHdrData *rprof; FloatHdrData *cprof; /*from the reference table*/ rprof = cte->rprof; cprof = cte->cprof; /*FIGURE OUT WHICH TRAPS WE DON'T NEED TO WORRY ABOUT IN THIS COLUMN PMAX SHOULD ALWAYS BE POSITIVE HERE */ pmax=10.; for(j=0; j<RAZ_ROWS; j++){ pixo[j] = pixi[j]; if (pixo[j] > pmax) pmax=pixo[j]; } /*GO THROUGH THE TRAPS ONE AT A TIME, FROM HIGHEST TO LOWEST Q, AND SEE WHEN THEY GET FILLED AND EMPTIED, ADJUST THE PIXELS ACCORDINGLY*/ for (w = cte->cte_traps-1; w>=0; w--){ if ( cte->qlevq_data[w] <= pmax ) { ftrap = 0.0e0; ttrap = cte->cte_len; /*for referencing the image at 0*/ fcarry = 0.0e0; /*GO UP THE COLUMN PIXEL BY PIXEL*/ for(j=0; j<RAZ_ROWS;j++){ pix_1 = pixo[j]; if ( (ttrap < cte->cte_len) || ( pix_1 >= cte->qlevq_data[w] - 1. ) ){ if (pixo[j] >= 0 ){ pix_1 = pixo[j] + fcarry; /*shuffle charge in*/ fcarry = pix_1 - floor(pix_1); /*carry the charge remainder*/ pix_1 = floor(pix_1); /*reset pixel*/ } /*HAPPENS AFTER FIRST PASS*/ /*SHUFFLE CHARGE IN*/ if ( j> 0 ) { if (pixf[j] < pixf[j-1]) ftrap *= (pixf[j] / pixf[j-1]); } /*RELEASE THE CHARGE*/ padd_2=0.0; if (ttrap <cte->cte_len){ ttrap += 1; padd_2 = Pix(rprof->data,w,ttrap-1) *ftrap; } padd_3 = 0.0; prem_3 = 0.0; if ( pix_1 >= cte->qlevq_data[w]){ prem_3 = cte->dpdew_data[w] / cte->n_par * pixf[j]; /*dpdew is 1 in file */ if (ttrap < cte->cte_len) padd_3 = Pix(cprof->data,w,ttrap-1)*ftrap; ttrap=0; ftrap=prem_3; } pixo[j] += padd_2 + padd_3 - prem_3; } /*replaces trap continue*/ }/*end if j>0*/ }/* end if qlevq > pmax, replaces continue*/ }/*end for w*/ return(status); } int initCTETrl (char *input, char *output) { extern int status; char trl_in[CHAR_LINE_LENGTH+1]; /* trailer filename for input */ char trl_out[CHAR_LINE_LENGTH+1]; /* output trailer filename */ int exist; int MkName (char *, char *, char *, char *, char *, int); int TrlExists (char *); /* Initialize internal variables */ trl_in[0] = '\0'; trl_out[0] = '\0'; exist = EXISTS_UNKNOWN; /* Input and output suffixes. */ char *isuffix[] = {"_raw"}; char *osuffix[] = {"_rac_tmp"}; char *trlsuffix[] = {""}; int nsuffix = 1; /* Start by stripping off suffix from input/output filenames */ if (MkOutName (input, isuffix, trlsuffix, nsuffix, trl_in, CHAR_LINE_LENGTH)) { WhichError (status); sprintf (MsgText, "Couldn't determine trailer filename for %s", input); trlmessage (MsgText); } if (MkOutName (output, osuffix, trlsuffix, nsuffix, trl_out, CHAR_LINE_LENGTH)) { WhichError (status); sprintf (MsgText, "Couldn't create trailer filename for %s", output); trlmessage (MsgText); } /* NOW, CONVERT TRAILER FILENAME EXTENSIONS FROM '.FITS' TO '.TRL' */ if (MkNewExtn (trl_in, TRL_EXTN) ) { sprintf (MsgText, "Error with input trailer filename %s", trl_in); trlerror (MsgText); WhichError (status); } if (MkNewExtn (trl_out, TRL_EXTN) ) { sprintf (MsgText, "Error with output trailer filename %s", trl_out); trlerror (MsgText); WhichError (status); } /* If we are working with a RAW file, then see if a TRL file needs to be overwritten after the generic conversion comments. */ if (strstr(input, isuffix[0]) != NULL) { /* Test whether the output file already exists */ exist = TrlExists(trl_out); if (exist == EXISTS_YES) { /* The output file exists, so we want to add to them ** the new trailer comments. */ SetTrlOverwriteMode (NO); } } /* Sets up temp trailer file for output and copies input ** trailer file into it. */ InitTrlFile (trl_in, trl_out); return(status); } /* #2 int sim_colreadout_l_uvis_w --- CTE correction for one column #3 int sub_ctecor_v2c --- reverse CTE correction for image */ /* ------------------------------------------- */ /* */ /* Readnoise correction for a single column. */ /* */ /* ------------------------------------------- */ int rm_rnZ_colj(double *pixj_chg, double *pixj_rnz, double *pixj_rsz, double RNMIT) { int NIT; int j; double dd; double dtot; int ntot; double ftot; double rtot; double RNN; double RNU; double sqrt(); int pixj_ffu[RAZ_ROWS]; for(j=0;j<RAZ_ROWS;j++) { pixj_rsz[j] = pixj_chg[j]; pixj_rnz[j] = 0.0; pixj_ffu[j] = 0.0; } /* * Find the upper and lower limits where there are valid * pixels - this is done to accommodate subarrays */ int j1 = XAMP_SCI_DIM-1; int j2 = 2; /* There are no "greater than zero" pixels below j1 */ for (j=2; j<=XAMP_SCI_DIM-1; j++) { if (j1==XAMP_SCI_DIM-1 && pixj_chg[j] > 0) j1 = j; } /* There are no "greater than zero" pixels above j2*/ for (j=XAMP_SCI_DIM-1; j>=2; j--) { if (j2==2 && pixj_chg[j] > 0) j2 = j; } /* * For each interation, allow a bit more noise. This way we can * stop when just enough is allowed to go through the column from 2nd * to 2nd from the top. */ for(NIT=1;NIT<NITMAX;NIT++) { RNN = RNMIT*(1.00+5.0*NIT/(float)NITMAX); /* Bounds of this loop include only the non-zero portion of the data */ for(j=j1; j<=j2; j++) { /* Compare each pixel to the average of its up/down neighbors */ dd = pixj_rsz[j]-(pixj_rsz[j+1]+pixj_rsz[j-1])/2.0; pixj_ffu[j] = 0.0; /* If the pixel is within the readnoise window... */ if (fabs(dd) < RNN) { /* Cap the adjustments we are willing to make at any time to 20% */ if (dd > RNMIT/5.0) dd = RNMIT/5.0; if (dd < -RNMIT/5.0) dd = -RNMIT/5.0; /* Take half of the maximum adjustment from the current pixel... */ pixj_rnz[j ] = pixj_rnz[j ] + dd*0.50; /* ...and give half of the value to the pixel below and above */ pixj_rnz[j-1] = pixj_rnz[j-1] - dd*0.25; pixj_rnz[j+1] = pixj_rnz[j+1] - dd*0.25; /* Flag this pixel to be in the readnoise range so we can track * the total noise in the nominal pixels */ pixj_ffu[j ] = 1.0; } } /* Bounds of this loop include only the non-zero portion of the data */ for(j=j1; j<=j2; j++) { pixj_rsz[j] = pixj_chg[j] - pixj_rnz[j]; } dtot = 0.; ntot = 0.; ftot = 0.; rtot = 0.; /* Bounds of this loop include only the non-zero portion of the data */ for(j=j1; j<=j2; j++) { ftot = ftot + pixj_ffu[j]; dtot = dtot + pixj_rnz[j]*pixj_rnz[j]; dd = pixj_rsz[j]-(pixj_rsz[j+1]+pixj_rsz[j-1])/2.0; rtot = rtot + dd*dd; ntot = ntot + 1; } RNU = sqrt(dtot/ftot); if (RNU > RNMIT) return(0); } return(0); } /* ---------------------------------------------------------------*/ /* */ /* CTE correction for a single column. */ /* */ /* TDIM is the length of the trails that are being considered. */ /* */ /* ---------------------------------------------------------------*/ #define _TDIM_ 60 int sim_colreadout_l_uvis_w(double *pixi, // input column array (JDIM) double *pixo, // outout column array (JDIM) double *pixf, // scaling of model for each pixel (JDIM) int J1, // bottom and top pixel in column int J2, // bottom and top pixel in column int JDIM, // number of pixels in column double *q_w, // the charge level for trap#W (_WDIM_) double *dpde_w, // the amt of charge this trap grabs (_WDIM_) int NITs, // the num of iterations (dpde-->dpde/NITs) float *rprof_wt, // the trap emission for t=T (_WDIM_,100) float *cprof_wt, // the amount left in trap after t=T emission (_WDIM_,100) int Ws) { int j; // pixel location up the column double ftrap; // total number of electrons in the trap int ttrap; // shifts since the trap was last filled int w; // trap counter double pmax; // max pixel value in the column - tells us the highest relevant trap numbrer int Wf; // highest relevant trap number double prel_1; // amount in incidental release from trap double prel_2; // amount in flush release of trap double pgrb_3; // amount grabbed by filling trap float rprof_t[_TDIM_]; float cprof_t[_TDIM_]; /* Bounds checking */ if (Ws>WsMAX) { printf("Ws error\n"); return(1); } /* Figure out which traps we do not need to worry about in this column */ pmax = 10; for(j=0;j<JDIM;j++) { pixo[j] = pixi[j]; if (pixo[j] > pmax) pmax = pixo[j]; } /* Figure out the highest trap number we need to consider */ Wf = 1; for (w=0;w<Ws;w++) { if (pmax >=q_w[w]) Wf = w; } /* Go thru the traps one at a time (from highest to lowest q) and see when they get filled and emptied; adjust the pixel values accordingly */ for (w=Wf;w>=0;w--) { // loop backwards for(ttrap=0;ttrap<_TDIM_;ttrap++) { rprof_t[ttrap] = rprof_wt[w+ttrap*WsMAX]; cprof_t[ttrap] = cprof_wt[w+ttrap*WsMAX]; } /* Initialize the flux in the trap to zero */ ftrap = 0.0; /* Initialize the time-since-flush to the max */ ttrap = _TDIM_ + 1; /* Go up the column, pixel-by-pixel */ for (j=J1;j<J2;j++) { /* If we have an inversion of the density (i.e., a readout-cosmic issue), then we do not want to flush too much */ if (j>J1) { if (pixf[j] < pixf[j-1]) { ftrap = pixf[j]/ pixf[j-1]*ftrap; } } /* Set up accounting of pixel value changes */ prel_1 = 0.; // charge to be released prel_2 = 0.; // charge to be flushed out pgrb_3 = 0.; // charge to be trapped /* Filled/refilled trap#W */ if (pixo[j] >= q_w[w]) { /* Need to flush before filling? */ if (ttrap < (_TDIM_)) { /* Increment time since filled */ ttrap = ttrap + 1; /* Flush out amount for this shift, and ...*/ prel_1 = rprof_t[ttrap-1]*ftrap; /* ...flush out the rest */ prel_2 = cprof_t[ttrap-1]*ftrap; } /* Amount to hold in the trap */ ftrap = dpde_w[w]/NITs*pixf[j]; /* Subtract that amount held from the pixel and reset the time-since-filled counter */ pgrb_3 = ftrap; ttrap = 0; } /* trap#W not filled at this time */ else { /* Check if the trap contains charge, and if so, then release the appropriate number of electrons */ if (ttrap < (_TDIM_)) { ttrap = ttrap + 1; prel_1 = rprof_t[ttrap-1]*ftrap; } } /* Make adjustments to the output pixel: add the trail emission, flush the trap, and fill the trap */ pixo[j] = pixo[j] + prel_1 + prel_2 - pgrb_3; } } return(0); } /* --------------------------------- */ /* */ /* CTE correction for one column. */ /* */ /* --------------------------------- */ int sub_ctecor_v2c(float *pixz_raz, float *pixz_fff, int Ws, double *q_w, double *dpde_w, float *rprof_wt, float *cprof_wt, float PCTERNOI, float FIX_ROCR, int PCTENFOR, int PCTENPAR, float *pixz_rzc) { extern int status; int i; int j; int jj; int jmax; int NITFOR, NITFORs; int NITPAR, NITPARs; double RNOI; int ret; double *pixj_fff; double *pixj_raz; double *pixj_mod; double *pixj_rnz; double *pixj_rsz; double *pixj_org; double *pixj_obs; double *pixj_chg; int NCRX; int DONE; int NDONE = 0; RNOI = PCTERNOI; NITFORs = PCTENFOR; NITPARs = PCTENPAR; printf(" \n"); printf(" INSIDE sub_ctecor_v2.f... \n"); printf(" ---> PCTERNOI: %8.4f \n",PCTERNOI); printf(" ---> FIX_ROCR: %8.4f \n",FIX_ROCR); printf(" ---> NITFORs: %5d \n",NITFORs); printf(" ---> NITPARs: %5d \n",NITPARs); printf(" \n"); #pragma omp parallel \ shared(pixz_raz,pixz_fff,pixz_rzc, \ NITPARs,NITFORs, \ q_w,dpde_w, \ rprof_wt,cprof_wt,Ws,NDONE) \ private(i,j,ret,NCRX, DONE, NITFOR,NITPAR, \ pixj_fff, pixj_raz, pixj_mod, pixj_rnz, \ pixj_rsz, pixj_org, pixj_obs, pixj_chg) #pragma omp for for(i=0;i<RAZ_COLS;i++) { pixj_fff = malloc(RAZ_ROWS*8); pixj_raz = malloc(RAZ_ROWS*8); pixj_mod = malloc(RAZ_ROWS*8); pixj_rnz = malloc(RAZ_ROWS*8); pixj_rsz = malloc(RAZ_ROWS*8); pixj_org = malloc(RAZ_ROWS*8); pixj_obs = malloc(RAZ_ROWS*8); pixj_chg = malloc(RAZ_ROWS*8); for(j=0;j<RAZ_ROWS;j++) { pixj_raz[j] = pixz_raz[i+j*RAZ_COLS]; pixj_fff[j] = pixz_fff[i+j*RAZ_COLS]; } NCRX = 0; DONE = 0; while(!DONE) { NCRX = NCRX + 1; DONE = 1; for (j=0;j<RAZ_ROWS;j++) { pixj_mod[j] = pixj_raz[j]; pixj_chg[j] = 0.0; } for(NITFOR=0;NITFOR<NITFORs;NITFOR++) { ret = rm_rnZ_colj(pixj_mod,pixj_rnz,pixj_rsz,RNOI); for(j=0;j<RAZ_ROWS;j++) { pixj_org[j] = pixj_rsz[j]; } for(NITPAR=1;NITPAR<=NITPARs;NITPAR++) { ret = sim_colreadout_l_uvis_w(pixj_org, pixj_obs, pixj_fff, 1,RAZ_ROWS,RAZ_ROWS, q_w,dpde_w,NITPARs, rprof_wt,cprof_wt,Ws); for (j=0;j<RAZ_ROWS;j++) { pixj_org[j] = pixj_obs[j]; } } for(j=0;j<RAZ_ROWS;j++) { pixj_chg[j] = pixj_obs[j] - pixj_rsz[j]; pixj_mod[j] = pixj_raz[j] - pixj_chg[j]; } } if (FIX_ROCR<0) { for(j=15;j<=2060;j++) { if (pixj_mod[j] < FIX_ROCR && pixj_mod[j]-pixj_raz[j] < FIX_ROCR && pixj_mod[j] < pixj_mod[j+1] && pixj_mod[j] < pixj_mod[j-1]) { jmax = j; for(jj=j-2;jj<j;jj++) { if (pixj_mod[jj ]-pixj_raz[jj ] > pixj_mod[jmax]-pixj_raz[jmax]) { jmax = jj; } } if (pixj_mod[jmax]-pixj_raz[jmax] > -2.5*FIX_ROCR) { pixj_fff[jmax] = pixj_fff[jmax]*0.90; DONE = NCRX >= 10; } } } } } for(j=0;j<RAZ_ROWS;j++) { pixz_rzc[i+j*RAZ_COLS] = pixj_mod[j]; pixz_fff[i+j*RAZ_COLS] = pixj_fff[j]; } free(pixj_fff); free(pixj_raz); free(pixj_mod); free(pixj_rnz); free(pixj_rsz); free(pixj_org); free(pixj_obs); free(pixj_chg); /* This variable exists for debuggin purposes. */ NDONE++; /*if (NDONE==(NDONE/100)*100) { printf(" i = %5d %5d %5d \n",i,NCRX,NDONE); }*/ } return(status); } /* * This routine dynamically determines the noise in the input image. */ float find_raz2rnoival(float *raz_cdab, float *FLOAT_RNOIVAL, float *FLOAT_BKGDVAL) { /* Return value */ float RNOIVALo; int i, j, ik; float b, d; int ih, iih; long dhist[NUM_BINS], dcum[NUM_BINS], vtot; long vhist[NUM_BINS], vcum[NUM_BINS], dtot; int ivmin, id1, id2; int idmin, iv1, iv2; long long vsum; long long nsum; int j1, j2; float RNOIVAL; float RNOIVALu; float BKGDVAL; float BKGDVALu; FLOAT_RNOIVAL[0] = 3.33; FLOAT_BKGDVAL[0] = raz_cdab[0]; iv1 = 1; iv2 = 999; id1 = 1; id2 = 999; /* * Distill the image variation information and background into quick histograms */ for (ih=1; ih<=NUM_BINS; ih++) { dhist[ih-1] = 0; vhist[ih-1] = 0; } for (i=2; i<=RAZ_COLS; i++) { /* * Find the upper and lower limits where there are valid pixels * to accommodate subarrays */ j1 = XAMP_SCI_DIM-1; j2 = 2; /* There are no "greater than zero" pixels below j1 */ for (j=2; j<=XAMP_SCI_DIM-1; j++) { if (j1==XAMP_SCI_DIM-1 && raz_cdab[i+(j-1)*RAZ_COLS-1] > 0) j1 = j; } /* There are no "greater than zero" pixels above j2 */ for (j=XAMP_SCI_DIM-1; j>=2; j--) { if (j2==2 && raz_cdab[i+(j-1)*RAZ_COLS-1] > 0) j2 = j; } /* * Process the valid pixels. "ik" is the chip horizontal locator. * For each pixel in the recorded part of the detector, find the * average of the surrounding 8 pixels */ for (j=j1; j<=j2; j++) { ik = i - (i-1)/2103*2103; if (ik > 25 && ik < XAMP_SCI_DIM+24) { b = (raz_cdab[i-1+(j+1-1)*RAZ_COLS-1] +raz_cdab[i +(j+1-1)*RAZ_COLS-1] +raz_cdab[i+1+(j+1-1)*RAZ_COLS-1] +raz_cdab[i-1+(j -1)*RAZ_COLS-1] +raz_cdab[i+1+(j -1)*RAZ_COLS-1] +raz_cdab[i-1+(j-1-1)*RAZ_COLS-1] +raz_cdab[i +(j-1-1)*RAZ_COLS-1] +raz_cdab[i+1+(j-1-1)*RAZ_COLS-1])/8.00; /* Local residual as proxy for noise */ d = raz_cdab[i+(j-1)*RAZ_COLS-1] - b; /* Locate within the histogram bin; 4.5 is to spread the values out. * The value of 4.5 is 3 times the gain, where the gain is 1.5 */ ih = 501 + d*(SPREAD_FOR_HISTO) + 0.5; if (ih < 1) ih = 1; if (ih > NUM_BINS) ih = NUM_BINS; /* Increment the histogram bin */ dhist[ih-1] = dhist[ih-1] + 1; /* Locate the pixel value within the histogram bin */ ih = 501 + raz_cdab[i+(j-1)*RAZ_COLS-1]*(SPREAD_FOR_HISTO) + 0.5; if (ih < 1) ih = 1; if (ih > NUM_BINS) ih = NUM_BINS; /* Increment the histogram bin */ vhist[ih-1] = vhist[ih-1] + 1; } } } /* Compute the cumulative distribution for the noise and for the background */ dtot = 0; vtot = 0; for (ih=1; ih<=NUM_BINS; ih++) { if (ih > 1 && ih < NUM_BINS) { dtot = dtot + dhist[ih-1]; vtot = vtot + vhist[ih-1]; } dcum[ih-1] = dtot; vcum[ih-1] = vtot; } idmin = 999; ivmin = 999; /* * Find the closest 75% of the points and use them to determine the noise * and the background */ for (ih=1; ih<=NUM_BINS-1; ih++) { for (iih=ih+1; iih<=NUM_BINS-1; iih++) { if (dcum[iih-1]-dcum[ih-1] > 0.75*dtot && iih-ih < idmin) { id1 = ih; id2 = iih; idmin = iih-ih; } if (vcum[iih-1]-vcum[ih-1] > 0.75*vtot && iih-ih < ivmin) { iv1 = ih; iv2 = iih; ivmin = iih-ih; } } } nsum = 0; vsum = 0; for (ih=iv1; ih<=iv2; ih++) { nsum = nsum + vhist[ih-1]; vsum = vsum + vhist[ih-1]*(ih-501); } if (vsum==0 || nsum==0) { RNOIVALu = 9.75 ; BKGDVALu = 999.9 ; *FLOAT_RNOIVAL = RNOIVALu; *FLOAT_BKGDVAL = BKGDVALu; return(RNOIVALu); } /* For debugging purposes only printf(" \n"); printf(" vsum: %12lld \n",vsum); printf(" nsum: %12lld \n",nsum); printf(" \n"); printf(" dbar: %12.2f \n",idmin/2.30*(SPREAD_FOR_HISTO)); printf(" vbar: %12.2f %12lld %12lld \n",vsum/nsum*(SPREAD_FOR_HISTO),vsum,nsum); printf(" \n"); */ RNOIVAL = (int)(idmin/2.30/(SPREAD_FOR_HISTO)/sqrt(1+1/8.0)*4+0.5)/4.00; RNOIVAL = idmin/2.30/(SPREAD_FOR_HISTO)/sqrt(1+1/8.0); RNOIVALu = RNOIVAL; if (RNOIVALu > HIGH_CLIP) RNOIVALu = HIGH_CLIP; BKGDVAL = 1.*vsum/nsum/(SPREAD_FOR_HISTO); BKGDVALu = BKGDVAL; if (BKGDVALu > 999.9) BKGDVALu = 999.9; /* Values which can be used for diagnostic analysis */ *FLOAT_RNOIVAL = RNOIVALu; *FLOAT_BKGDVAL = BKGDVALu; /* LOW_CLIP and HIGH_CLIP are imposed limits on the computed noise value */ RNOIVALo = RNOIVALu; if (RNOIVALo < LOW_CLIP) RNOIVALo = LOW_CLIP; if (RNOIVALo > HIGH_CLIP) RNOIVALo = HIGH_CLIP; return(RNOIVALo); }
graph.c
/*! * \file * * \brief Various routines with dealing with sparse graphs * * \author George Karypis */ #include <GKlib.h> #define OMPMINOPS 50000 /*************************************************************************/ /*! Allocate memory for a graph and initializes it \returns the allocated graph. The various fields are set to NULL. */ /**************************************************************************/ gk_graph_t *gk_graph_Create() { gk_graph_t *graph; graph = (gk_graph_t *)gk_malloc(sizeof(gk_graph_t), "gk_graph_Create: graph"); gk_graph_Init(graph); return graph; } /*************************************************************************/ /*! Initializes the graph. \param graph is the graph to be initialized. */ /*************************************************************************/ void gk_graph_Init(gk_graph_t *graph) { memset(graph, 0, sizeof(gk_graph_t)); graph->nvtxs = -1; } /*************************************************************************/ /*! Frees all the memory allocated for a graph. \param graph is the graph to be freed. */ /*************************************************************************/ void gk_graph_Free(gk_graph_t **graph) { if (*graph == NULL) return; gk_graph_FreeContents(*graph); gk_free((void **)graph, LTERM); } /*************************************************************************/ /*! Frees only the memory allocated for the graph's different fields and sets them to NULL. \param graph is the graph whose contents will be freed. */ /*************************************************************************/ void gk_graph_FreeContents(gk_graph_t *graph) { gk_free((void *)&graph->xadj, &graph->adjncy, &graph->iadjwgt, &graph->fadjwgt, &graph->ivwgts, &graph->fvwgts, &graph->ivsizes, &graph->fvsizes, &graph->vlabels, LTERM); } /**************************************************************************/ /*! Reads a sparse graph from the supplied file \param filename is the file that stores the data. \param format is the graph format. The supported values are: GK_GRAPH_FMT_METIS. \param isfewgts is 1 if the edge-weights should be read as floats \param isfvwgts is 1 if the vertex-weights should be read as floats \param isfvsizes is 1 if the vertex-sizes should be read as floats \returns the graph that was read. */ /**************************************************************************/ gk_graph_t *gk_graph_Read(char *filename, int format, int isfewgts, int isfvwgts, int isfvsizes) { ssize_t i, k, l; size_t nfields, nvtxs, nedges, fmt, ncon, lnlen; int32_t ival; float fval; int readsizes=0, readwgts=0, readvals=0, numbering=0; char *line=NULL, *head, *tail, fmtstr[256]; FILE *fpin=NULL; gk_graph_t *graph=NULL; if (!gk_fexists(filename)) gk_errexit(SIGERR, "File %s does not exist!\n", filename); if (format == GK_GRAPH_FMT_METIS) { fpin = gk_fopen(filename, "r", "gk_graph_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); fmt = ncon = 0; nfields = sscanf(line, "%zu %zu %zu %zu", &nvtxs, &nedges, &fmt, &ncon); if (nfields < 2) gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n"); nedges *= 2; if (fmt > 111) gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt); sprintf(fmtstr, "%03zu", fmt%1000); readsizes = (fmtstr[0] == '1'); readwgts = (fmtstr[1] == '1'); readvals = (fmtstr[2] == '1'); numbering = 1; ncon = (ncon == 0 ? 1 : ncon); } else { gk_errexit(SIGERR, "Unrecognized format: %d\n", format); } graph = gk_graph_Create(); graph->nvtxs = nvtxs; graph->xadj = gk_zmalloc(nvtxs+1, "gk_graph_Read: xadj"); graph->adjncy = gk_i32malloc(nedges, "gk_graph_Read: adjncy"); if (readvals) { if (isfewgts) graph->fadjwgt = gk_fmalloc(nedges, "gk_graph_Read: fadjwgt"); else graph->iadjwgt = gk_i32malloc(nedges, "gk_graph_Read: iadjwgt"); } if (readsizes) { if (isfvsizes) graph->fvsizes = gk_fmalloc(nvtxs, "gk_graph_Read: fvsizes"); else graph->ivsizes = gk_i32malloc(nvtxs, "gk_graph_Read: ivsizes"); } if (readwgts) { if (isfvwgts) graph->fvwgts = gk_fmalloc(nvtxs*ncon, "gk_graph_Read: fvwgts"); else graph->ivwgts = gk_i32malloc(nvtxs*ncon, "gk_graph_Read: ivwgts"); } /*---------------------------------------------------------------------- * Read the sparse graph file *---------------------------------------------------------------------*/ numbering = (numbering ? - 1 : 0); for (graph->xadj[0]=0, k=0, i=0; i<nvtxs; i++) { do { if (gk_getline(&line, &lnlen, fpin) == -1) gk_errexit(SIGERR, "Pregraphure end of input file: file while reading row %d\n", i); } while (line[0] == '%'); head = line; tail = NULL; /* Read vertex sizes */ if (readsizes) { if (isfvsizes) { #ifdef __MSC__ graph->fvsizes[i] = (float)strtod(head, &tail); #else graph->fvsizes[i] = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1); if (graph->fvsizes[i] < 0) gk_errexit(SIGERR, "The size for vertex %zd must be >= 0\n", i+1); } else { graph->ivsizes[i] = strtol(head, &tail, 0); if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1); if (graph->ivsizes[i] < 0) gk_errexit(SIGERR, "The size for vertex %zd must be >= 0\n", i+1); } head = tail; } /* Read vertex weights */ if (readwgts) { for (l=0; l<ncon; l++) { if (isfvwgts) { #ifdef __MSC__ graph->fvwgts[i*ncon+l] = (float)strtod(head, &tail); #else graph->fvwgts[i*ncon+l] = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have enough weights " "for the %d constraints.\n", i+1, ncon); if (graph->fvwgts[i*ncon+l] < 0) gk_errexit(SIGERR, "The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l); } else { graph->ivwgts[i*ncon+l] = strtol(head, &tail, 0); if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have enough weights " "for the %d constraints.\n", i+1, ncon); if (graph->ivwgts[i*ncon+l] < 0) gk_errexit(SIGERR, "The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l); } head = tail; } } /* Read the rest of the row */ while (1) { ival = (int)strtol(head, &tail, 0); if (tail == head) break; head = tail; if ((graph->adjncy[k] = ival + numbering) < 0) gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i); if (readvals) { if (isfewgts) { #ifdef __MSC__ fval = (float)strtod(head, &tail); #else fval = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "Value could not be found for edge! Vertex:%zd, NNZ:%zd\n", i, k); graph->fadjwgt[k] = fval; } else { ival = strtol(head, &tail, 0); if (tail == head) gk_errexit(SIGERR, "Value could not be found for edge! Vertex:%zd, NNZ:%zd\n", i, k); graph->iadjwgt[k] = ival; } head = tail; } k++; } graph->xadj[i+1] = k; } if (k != nedges) gk_errexit(SIGERR, "gk_graph_Read: Something wrong with the number of edges in " "the input file. nedges=%zd, Actualnedges=%zd.\n", nedges, k); gk_fclose(fpin); gk_free((void **)&line, LTERM); return graph; } /**************************************************************************/ /*! Writes a graph into a file. \param graph is the graph to be written, \param filename is the name of the output file. \param format is one of GK_GRAPH_FMT_METIS specifying the format of the output file. */ /**************************************************************************/ void gk_graph_Write(gk_graph_t *graph, char *filename, int format) { ssize_t i, j; int hasvwgts, hasvsizes, hasewgts; FILE *fpout; if (format != GK_GRAPH_FMT_METIS) gk_errexit(SIGERR, "Unknown file format. %d\n", format); if (filename) fpout = gk_fopen(filename, "w", "gk_graph_Write: fpout"); else fpout = stdout; hasewgts = (graph->iadjwgt || graph->fadjwgt); hasvwgts = (graph->ivwgts || graph->fvwgts); hasvsizes = (graph->ivsizes || graph->fvsizes); /* write the header line */ fprintf(fpout, "%d %zd", graph->nvtxs, graph->xadj[graph->nvtxs]/2); if (hasvwgts || hasvsizes || hasewgts) fprintf(fpout, " %d%d%d", hasvsizes, hasvwgts, hasewgts); fprintf(fpout, "\n"); for (i=0; i<graph->nvtxs; i++) { if (hasvsizes) { if (graph->ivsizes) fprintf(fpout, " %d", graph->ivsizes[i]); else fprintf(fpout, " %f", graph->fvsizes[i]); } if (hasvwgts) { if (graph->ivwgts) fprintf(fpout, " %d", graph->ivwgts[i]); else fprintf(fpout, " %f", graph->fvwgts[i]); } for (j=graph->xadj[i]; j<graph->xadj[i+1]; j++) { fprintf(fpout, " %d", graph->adjncy[j]+1); if (hasewgts) { if (graph->iadjwgt) fprintf(fpout, " %d", graph->iadjwgt[j]); else fprintf(fpout, " %f", graph->fadjwgt[j]); } } fprintf(fpout, "\n"); } if (filename) gk_fclose(fpout); } /*************************************************************************/ /*! Returns a copy of a graph. \param graph is the graph to be duplicated. \returns the newly created copy of the graph. */ /**************************************************************************/ gk_graph_t *gk_graph_Dup(gk_graph_t *graph) { gk_graph_t *ngraph; ngraph = gk_graph_Create(); ngraph->nvtxs = graph->nvtxs; /* copy the adjacency structure */ if (graph->xadj) ngraph->xadj = gk_zcopy(graph->nvtxs+1, graph->xadj, gk_zmalloc(graph->nvtxs+1, "gk_graph_Dup: xadj")); if (graph->ivwgts) ngraph->ivwgts = gk_i32copy(graph->nvtxs, graph->ivwgts, gk_i32malloc(graph->nvtxs, "gk_graph_Dup: ivwgts")); if (graph->ivsizes) ngraph->ivsizes = gk_i32copy(graph->nvtxs, graph->ivsizes, gk_i32malloc(graph->nvtxs, "gk_graph_Dup: ivsizes")); if (graph->vlabels) ngraph->vlabels = gk_i32copy(graph->nvtxs, graph->vlabels, gk_i32malloc(graph->nvtxs, "gk_graph_Dup: ivlabels")); if (graph->fvwgts) ngraph->fvwgts = gk_fcopy(graph->nvtxs, graph->fvwgts, gk_fmalloc(graph->nvtxs, "gk_graph_Dup: fvwgts")); if (graph->fvsizes) ngraph->fvsizes = gk_fcopy(graph->nvtxs, graph->fvsizes, gk_fmalloc(graph->nvtxs, "gk_graph_Dup: fvsizes")); if (graph->adjncy) ngraph->adjncy = gk_i32copy(graph->xadj[graph->nvtxs], graph->adjncy, gk_i32malloc(graph->xadj[graph->nvtxs], "gk_graph_Dup: adjncy")); if (graph->iadjwgt) ngraph->iadjwgt = gk_i32copy(graph->xadj[graph->nvtxs], graph->iadjwgt, gk_i32malloc(graph->xadj[graph->nvtxs], "gk_graph_Dup: iadjwgt")); if (graph->fadjwgt) ngraph->fadjwgt = gk_fcopy(graph->xadj[graph->nvtxs], graph->fadjwgt, gk_fmalloc(graph->xadj[graph->nvtxs], "gk_graph_Dup: fadjwgt")); return ngraph; } /*************************************************************************/ /*! Returns a subgraph containing a set of consecutive vertices. \param graph is the original graph. \param vstart is the starting vertex. \param nvtxs is the number of vertices from vstart to extract. \returns the newly created subgraph. */ /**************************************************************************/ gk_graph_t *gk_graph_ExtractSubgraph(gk_graph_t *graph, int vstart, int nvtxs) { ssize_t i; gk_graph_t *ngraph; if (vstart+nvtxs > graph->nvtxs) return NULL; ngraph = gk_graph_Create(); ngraph->nvtxs = nvtxs; /* copy the adjancy structure */ if (graph->xadj) ngraph->xadj = gk_zcopy(nvtxs+1, graph->xadj+vstart, gk_zmalloc(nvtxs+1, "gk_graph_ExtractSubgraph: xadj")); for (i=nvtxs; i>=0; i--) ngraph->xadj[i] -= ngraph->xadj[0]; ASSERT(ngraph->xadj[0] == 0); if (graph->ivwgts) ngraph->ivwgts = gk_i32copy(nvtxs, graph->ivwgts+vstart, gk_i32malloc(nvtxs, "gk_graph_ExtractSubgraph: ivwgts")); if (graph->ivsizes) ngraph->ivsizes = gk_i32copy(nvtxs, graph->ivsizes+vstart, gk_i32malloc(nvtxs, "gk_graph_ExtractSubgraph: ivsizes")); if (graph->vlabels) ngraph->vlabels = gk_i32copy(nvtxs, graph->vlabels+vstart, gk_i32malloc(nvtxs, "gk_graph_ExtractSubgraph: vlabels")); if (graph->fvwgts) ngraph->fvwgts = gk_fcopy(nvtxs, graph->fvwgts+vstart, gk_fmalloc(nvtxs, "gk_graph_ExtractSubgraph: fvwgts")); if (graph->fvsizes) ngraph->fvsizes = gk_fcopy(nvtxs, graph->fvsizes+vstart, gk_fmalloc(nvtxs, "gk_graph_ExtractSubgraph: fvsizes")); ASSERT(ngraph->xadj[nvtxs] == graph->xadj[vstart+nvtxs]-graph->xadj[vstart]); if (graph->adjncy) ngraph->adjncy = gk_i32copy(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], graph->adjncy+graph->xadj[vstart], gk_i32malloc(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], "gk_graph_ExtractSubgraph: adjncy")); if (graph->iadjwgt) ngraph->iadjwgt = gk_i32copy(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], graph->iadjwgt+graph->xadj[vstart], gk_i32malloc(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], "gk_graph_ExtractSubgraph: iadjwgt")); if (graph->fadjwgt) ngraph->fadjwgt = gk_fcopy(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], graph->fadjwgt+graph->xadj[vstart], gk_fmalloc(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], "gk_graph_ExtractSubgraph: fadjwgt")); return ngraph; } /*************************************************************************/ /*! Returns a graph that has been reordered according to the permutation. \param[IN] graph is the graph to be re-ordered. \param[IN] perm is the new ordering of the graph's vertices \param[IN] iperm is the original ordering of the re-ordered graph's vertices \returns the newly created copy of the graph. \note Either perm or iperm can be NULL but not both. */ /**************************************************************************/ gk_graph_t *gk_graph_Reorder(gk_graph_t *graph, int32_t *perm, int32_t *iperm) { ssize_t j, jj, *xadj; int i, k, u, v, nvtxs; int freeperm=0, freeiperm=0; int32_t *adjncy; gk_graph_t *ngraph; if (perm == NULL && iperm == NULL) return NULL; ngraph = gk_graph_Create(); ngraph->nvtxs = nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; /* allocate memory for the different structures that are present in graph */ if (graph->xadj) ngraph->xadj = gk_zmalloc(nvtxs+1, "gk_graph_Reorder: xadj"); if (graph->ivwgts) ngraph->ivwgts = gk_i32malloc(nvtxs, "gk_graph_Reorder: ivwgts"); if (graph->ivsizes) ngraph->ivsizes = gk_i32malloc(nvtxs, "gk_graph_Reorder: ivsizes"); if (graph->vlabels) ngraph->vlabels = gk_i32malloc(nvtxs, "gk_graph_Reorder: ivlabels"); if (graph->fvwgts) ngraph->fvwgts = gk_fmalloc(nvtxs, "gk_graph_Reorder: fvwgts"); if (graph->fvsizes) ngraph->fvsizes = gk_fmalloc(nvtxs, "gk_graph_Reorder: fvsizes"); if (graph->adjncy) ngraph->adjncy = gk_i32malloc(graph->xadj[nvtxs], "gk_graph_Reorder: adjncy"); if (graph->iadjwgt) ngraph->iadjwgt = gk_i32malloc(graph->xadj[nvtxs], "gk_graph_Reorder: iadjwgt"); if (graph->fadjwgt) ngraph->fadjwgt = gk_fmalloc(graph->xadj[nvtxs], "gk_graph_Reorder: fadjwgt"); /* create perm/iperm if not provided */ if (perm == NULL) { freeperm = 1; perm = gk_i32malloc(nvtxs, "gk_graph_Reorder: perm"); for (i=0; i<nvtxs; i++) perm[iperm[i]] = i; } if (iperm == NULL) { freeiperm = 1; iperm = gk_i32malloc(nvtxs, "gk_graph_Reorder: iperm"); for (i=0; i<nvtxs; i++) iperm[perm[i]] = i; } /* fill-in the information of the re-ordered graph */ ngraph->xadj[0] = jj = 0; for (v=0; v<nvtxs; v++) { u = iperm[v]; for (j=xadj[u]; j<xadj[u+1]; j++, jj++) { ngraph->adjncy[jj] = perm[adjncy[j]]; if (graph->iadjwgt) ngraph->iadjwgt[jj] = graph->iadjwgt[j]; if (graph->fadjwgt) ngraph->fadjwgt[jj] = graph->fadjwgt[j]; } if (graph->ivwgts) ngraph->ivwgts[v] = graph->ivwgts[u]; if (graph->fvwgts) ngraph->fvwgts[v] = graph->fvwgts[u]; if (graph->ivsizes) ngraph->ivsizes[v] = graph->ivsizes[u]; if (graph->fvsizes) ngraph->fvsizes[v] = graph->fvsizes[u]; if (graph->vlabels) ngraph->vlabels[v] = graph->vlabels[u]; ngraph->xadj[v+1] = jj; } /* free memory */ if (freeperm) gk_free((void **)&perm, LTERM); if (freeiperm) gk_free((void **)&iperm, LTERM); return ngraph; } /*************************************************************************/ /*! This function finds the connected components in a graph. \param graph is the graph structure \param cptr is the ptr structure of the CSR representation of the components. The length of this vector must be graph->nvtxs+1. \param cind is the indices structure of the CSR representation of the components. The length of this vector must be graph->nvtxs. \returns the number of components that it found. \note The cptr and cind parameters can be NULL, in which case only the number of connected components is returned. */ /*************************************************************************/ int gk_graph_FindComponents(gk_graph_t *graph, int32_t *cptr, int32_t *cind) { ssize_t i, ii, j, jj, k, nvtxs, first, last, ntodo, ncmps; ssize_t *xadj; int32_t *adjncy, *pos, *todo; int32_t mustfree_ccsr=0, mustfree_where=0; nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; /* Deal with NULL supplied cptr/cind vectors */ if (cptr == NULL) { cptr = gk_i32malloc(nvtxs+1, "gk_graph_FindComponents: cptr"); cind = gk_i32malloc(nvtxs, "gk_graph_FindComponents: cind"); mustfree_ccsr = 1; } /* The list of vertices that have not been touched yet. The valid entries are from [0..ntodo). */ todo = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: todo")); /* For a vertex that has not been visited, pos[i] is the position in the todo list that this vertex is stored. If a vertex has been visited, pos[i] = -1. */ pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: pos")); /* Find the connected componends */ ncmps = -1; ntodo = nvtxs; /* All vertices have not been visited */ first = last = 0; /* Point to the first and last vertices that have been touched but not explored. These vertices are stored in cind[first]...cind[last-1]. */ while (ntodo > 0) { if (first == last) { /* Find another starting vertex */ cptr[++ncmps] = first; /* Mark the end of the current CC */ ASSERT(pos[todo[0]] != -1); i = todo[0]; cind[last++] = i; pos[i] = -1; } i = cind[first++]; /* Get the first visited but unexplored vertex */ /* Remove i from the todo list and put the last item in the todo list at the position that i was so that the todo list will be consequtive. The pos[] array is updated accordingly to keep track the location of the vertices in the todo[] list. */ k = pos[i]; j = todo[k] = todo[--ntodo]; pos[j] = k; for (j=xadj[i]; j<xadj[i+1]; j++) { k = adjncy[j]; if (pos[k] != -1) { cind[last++] = k; pos[k] = -1; } } } cptr[++ncmps] = first; if (mustfree_ccsr) gk_free((void **)&cptr, &cind, LTERM); gk_free((void **)&pos, &todo, LTERM); return (int) ncmps; } /*************************************************************************/ /*! This function computes a permutation of the vertices based on a breadth-first-traversal. It can be used for re-ordering the graph to reduce its bandwidth for better cache locality. The algorithm used is a simplified version of the method used to find the connected components. \param[IN] graph is the graph structure \param[IN] v is the starting vertex of the BFS \param[OUT] perm[i] stores the ID of vertex i in the re-ordered graph. \param[OUT] iperm[i] stores the ID of the vertex that corresponds to the ith vertex in the re-ordered graph. \note The perm or iperm (but not both) can be NULL, at which point, the corresponding arrays are not returned. Though the program works fine when both are NULL, doing that is not smart. The returned arrays should be freed with gk_free(). */ /*************************************************************************/ void gk_graph_ComputeBFSOrdering(gk_graph_t *graph, int v, int32_t **r_perm, int32_t **r_iperm) { ssize_t j, *xadj; int i, k, nvtxs, first, last; int32_t *adjncy, *cot, *pos; if (graph->nvtxs <= 0) return; nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; /* This array will function like pos + touched of the CC method */ pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_ComputeBFSOrdering: pos")); /* This array ([C]losed[O]pen[T]odo => cot) serves three purposes. Positions from [0...first) is the current iperm[] vector of the explored vertices; Positions from [first...last) is the OPEN list (i.e., visited vertices); Positions from [last...nvtxs) is the todo list. */ cot = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_ComputeBFSOrdering: cot")); /* put v at the front of the todo list */ pos[0] = cot[0] = v; pos[v] = cot[v] = 0; /* Find the connected componends induced by the partition */ first = last = 0; while (first < nvtxs) { if (first == last) { /* Find another starting vertex */ k = cot[last]; ASSERT(pos[k] != -1); pos[k] = -1; /* mark node as being visited */ last++; } i = cot[first++]; /* the ++ advances the explored vertices */ for (j=xadj[i]; j<xadj[i+1]; j++) { k = adjncy[j]; /* if a node has already been visited, its perm[] will be -1 */ if (pos[k] != -1) { /* pos[k] is the location within iperm of where k resides (it is in the 'todo' part); It is placed in that location cot[last] (end of OPEN list) that we are about to overwrite and update pos[cot[last]] to reflect that. */ cot[pos[k]] = cot[last]; /* put the head of the todo list to where k was in the todo list */ pos[cot[last]] = pos[k]; /* update perm to reflect the move */ cot[last++] = k; /* put node at the end of the OPEN list */ pos[k] = -1; /* mark node as being visited */ } } } /* time to decide what to return */ if (r_perm != NULL) { /* use the 'pos' array to build the perm array */ for (i=0; i<nvtxs; i++) pos[cot[i]] = i; *r_perm = pos; pos = NULL; } if (r_iperm != NULL) { *r_iperm = cot; cot = NULL; } /* cleanup memory */ gk_free((void **)&pos, &cot, LTERM); } /*************************************************************************/ /*! This function computes a permutation of the vertices based on a best-first-traversal. It can be used for re-ordering the graph to reduce its bandwidth for better cache locality. \param[IN] graph is the graph structure. \param[IN] v is the starting vertex of the best-first traversal. \param[IN] type indicates the criteria to use to measure the 'bestness' of a vertex. \param[OUT] perm[i] stores the ID of vertex i in the re-ordered graph. \param[OUT] iperm[i] stores the ID of the vertex that corresponds to the ith vertex in the re-ordered graph. \note The perm or iperm (but not both) can be NULL, at which point, the corresponding arrays are not returned. Though the program works fine when both are NULL, doing that is not smart. The returned arrays should be freed with gk_free(). */ /*************************************************************************/ void gk_graph_ComputeBestFOrdering0(gk_graph_t *graph, int v, int type, int32_t **r_perm, int32_t **r_iperm) { ssize_t j, jj, *xadj; int i, k, u, nvtxs; int32_t *adjncy, *perm, *degrees, *minIDs, *open; gk_i32pq_t *queue; if (graph->nvtxs <= 0) return; nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; /* the degree of the vertices in the closed list */ degrees = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: degrees"); /* the minimum vertex ID of an open vertex to the closed list */ minIDs = gk_i32smalloc(nvtxs, nvtxs+1, "gk_graph_ComputeBestFOrdering: minIDs"); /* the open list */ open = gk_i32malloc(nvtxs, "gk_graph_ComputeBestFOrdering: open"); /* if perm[i] >= 0, then perm[i] is the order of vertex i; otherwise perm[i] == -1. */ perm = gk_i32smalloc(nvtxs, -1, "gk_graph_ComputeBestFOrdering: perm"); /* create the queue and put everything in it */ queue = gk_i32pqCreate(nvtxs); for (i=0; i<nvtxs; i++) gk_i32pqInsert(queue, i, 0); gk_i32pqUpdate(queue, v, 1); open[0] = v; /* start processing the nodes */ for (i=0; i<nvtxs; i++) { if ((v = gk_i32pqGetTop(queue)) == -1) gk_errexit(SIGERR, "The priority queue got empty ahead of time [i=%d].\n", i); if (perm[v] != -1) gk_errexit(SIGERR, "The perm[%d] has already been set.\n", v); perm[v] = i; for (j=xadj[v]; j<xadj[v+1]; j++) { u = adjncy[j]; if (perm[u] == -1) { degrees[u]++; minIDs[u] = (i < minIDs[u] ? i : minIDs[u]); switch (type) { case 1: /* DFS */ gk_i32pqUpdate(queue, u, 1); break; case 2: /* Max in closed degree */ gk_i32pqUpdate(queue, u, degrees[u]); break; case 3: /* Sum of orders in closed list */ for (k=0, jj=xadj[u]; jj<xadj[u+1]; jj++) { if (perm[adjncy[jj]] != -1) k += perm[adjncy[jj]]; } gk_i32pqUpdate(queue, u, k); break; case 4: /* Sum of order-differences (w.r.t. current number) in closed list (updated once in a while) */ for (k=0, jj=xadj[u]; jj<xadj[u+1]; jj++) { if (perm[adjncy[jj]] != -1) k += (i-perm[adjncy[jj]]); } gk_i32pqUpdate(queue, u, k); break; default: ; } } } } /* time to decide what to return */ if (r_perm != NULL) { *r_perm = perm; perm = NULL; } if (r_iperm != NULL) { /* use the 'degrees' array to build the iperm array */ for (i=0; i<nvtxs; i++) degrees[perm[i]] = i; *r_iperm = degrees; degrees = NULL; } /* cleanup memory */ gk_i32pqDestroy(queue); gk_free((void **)&perm, &degrees, &minIDs, &open, LTERM); } /*************************************************************************/ /*! This function computes a permutation of the vertices based on a best-first-traversal. It can be used for re-ordering the graph to reduce its bandwidth for better cache locality. \param[IN] graph is the graph structure. \param[IN] v is the starting vertex of the best-first traversal. \param[IN] type indicates the criteria to use to measure the 'bestness' of a vertex. \param[OUT] perm[i] stores the ID of vertex i in the re-ordered graph. \param[OUT] iperm[i] stores the ID of the vertex that corresponds to the ith vertex in the re-ordered graph. \note The perm or iperm (but not both) can be NULL, at which point, the corresponding arrays are not returned. Though the program works fine when both are NULL, doing that is not smart. The returned arrays should be freed with gk_free(). */ /*************************************************************************/ void gk_graph_ComputeBestFOrdering(gk_graph_t *graph, int v, int type, int32_t **r_perm, int32_t **r_iperm) { ssize_t j, jj, *xadj; int i, k, u, nvtxs, nopen, ntodo; int32_t *adjncy, *perm, *degrees, *wdegrees, *sod, *level, *ot, *pos; gk_i32pq_t *queue; if (graph->nvtxs <= 0) return; nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; /* the degree of the vertices in the closed list */ degrees = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: degrees"); /* the weighted degree of the vertices in the closed list for type==3 */ wdegrees = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: wdegrees"); /* the sum of differences for type==4 */ sod = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: sod"); /* the encountering level of a vertex type==5 */ level = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: level"); /* The open+todo list of vertices. The vertices from [0..nopen] are the open vertices. The vertices from [nopen..ntodo) are the todo vertices. */ ot = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: ot")); /* For a vertex that has not been explored, pos[i] is the position in the ot list. */ pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: pos")); /* if perm[i] >= 0, then perm[i] is the order of vertex i; otherwise perm[i] == -1. */ perm = gk_i32smalloc(nvtxs, -1, "gk_graph_ComputeBestFOrdering: perm"); /* create the queue and put the starting vertex in it */ queue = gk_i32pqCreate(nvtxs); gk_i32pqInsert(queue, v, 1); /* put v at the front of the open list */ pos[0] = ot[0] = v; pos[v] = ot[v] = 0; nopen = 1; ntodo = nvtxs; /* start processing the nodes */ for (i=0; i<nvtxs; i++) { if (nopen == 0) { /* deal with non-connected graphs */ gk_i32pqInsert(queue, ot[0], 1); nopen++; } if ((v = gk_i32pqGetTop(queue)) == -1) gk_errexit(SIGERR, "The priority queue got empty ahead of time [i=%d].\n", i); if (perm[v] != -1) gk_errexit(SIGERR, "The perm[%d] has already been set.\n", v); perm[v] = i; if (ot[pos[v]] != v) gk_errexit(SIGERR, "Something went wrong [ot[pos[%d]]!=%d.\n", v, v); if (pos[v] >= nopen) gk_errexit(SIGERR, "The position of v is not in open list. pos[%d]=%d is >=%d.\n", v, pos[v], nopen); /* remove v from the open list and re-arrange the todo part of the list */ ot[pos[v]] = ot[nopen-1]; pos[ot[nopen-1]] = pos[v]; if (ntodo > nopen) { ot[nopen-1] = ot[ntodo-1]; pos[ot[ntodo-1]] = nopen-1; } nopen--; ntodo--; for (j=xadj[v]; j<xadj[v+1]; j++) { u = adjncy[j]; if (perm[u] == -1) { /* update ot list, if u is not in the open list by putting it at the end of the open list. */ if (degrees[u] == 0) { ot[pos[u]] = ot[nopen]; pos[ot[nopen]] = pos[u]; ot[nopen] = u; pos[u] = nopen; nopen++; level[u] = level[v]+1; gk_i32pqInsert(queue, u, 0); } /* update the in-closed degree */ degrees[u]++; /* update the queues based on the type */ switch (type) { case 1: /* DFS */ gk_i32pqUpdate(queue, u, 1000*(i+1)+degrees[u]); break; case 2: /* Max in closed degree */ gk_i32pqUpdate(queue, u, degrees[u]); break; case 3: /* Sum of orders in closed list */ wdegrees[u] += i; gk_i32pqUpdate(queue, u, wdegrees[u]); break; case 4: /* Sum of order-differences */ /* this is handled at the end of the loop */ ; break; case 5: /* BFS with in degree priority */ gk_i32pqUpdate(queue, u, -(1000*level[u] - degrees[u])); break; case 6: /* Hybrid of 1+2 */ gk_i32pqUpdate(queue, u, (i+1)*degrees[u]); break; default: ; } } } if (type == 4) { /* update all the vertices in the open list */ for (j=0; j<nopen; j++) { u = ot[j]; if (perm[u] != -1) gk_errexit(SIGERR, "For i=%d, the open list contains a closed vertex: ot[%zd]=%d, perm[%d]=%d.\n", i, j, u, u, perm[u]); sod[u] += degrees[u]; if (i<1000 || i%25==0) gk_i32pqUpdate(queue, u, sod[u]); } } /* for (j=0; j<ntodo; j++) { if (pos[ot[j]] != j) gk_errexit(SIGERR, "pos[ot[%zd]] != %zd.\n", j, j); } */ } /* time to decide what to return */ if (r_perm != NULL) { *r_perm = perm; perm = NULL; } if (r_iperm != NULL) { /* use the 'degrees' array to build the iperm array */ for (i=0; i<nvtxs; i++) degrees[perm[i]] = i; *r_iperm = degrees; degrees = NULL; } /* cleanup memory */ gk_i32pqDestroy(queue); gk_free((void **)&perm, &degrees, &wdegrees, &sod, &ot, &pos, &level, LTERM); } /*************************************************************************/ /*! This function computes the single-source shortest path lengths from the root node to all the other nodes in the graph. If the graph is not connected then, the sortest part to the vertices in the other components is -1. \param[IN] graph is the graph structure. \param[IN] v is the root of the single-source shortest path computations. \param[IN] type indicates the criteria to use to measure the 'bestness' of a vertex. \param[OUT] sps[i] stores the length of the shortest path from v to vertex i. If no such path exists, then it is -1. Note that the returned array will be either an array of int32_t or an array of floats. The specific type is determined by the existance of non NULL iadjwgt and fadjwgt arrays. If both of these arrays exist, then priority is given to iadjwgt. \note The returned array should be freed with gk_free(). */ /*************************************************************************/ void gk_graph_SingleSourceShortestPaths(gk_graph_t *graph, int v, void **r_sps) { ssize_t *xadj; int i, u, nvtxs; int32_t *adjncy, *inqueue; if (graph->nvtxs <= 0) return; nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; inqueue = gk_i32smalloc(nvtxs, 0, "gk_graph_SingleSourceShortestPaths: inqueue"); /* determine if you will be computing using int32_t or float and proceed from there */ if (graph->iadjwgt != NULL) { gk_i32pq_t *queue; int32_t *adjwgt; int32_t *sps; adjwgt = graph->iadjwgt; queue = gk_i32pqCreate(nvtxs); gk_i32pqInsert(queue, v, 0); inqueue[v] = 1; sps = gk_i32smalloc(nvtxs, -1, "gk_graph_SingleSourceShortestPaths: sps"); sps[v] = 0; /* start processing the nodes */ while ((v = gk_i32pqGetTop(queue)) != -1) { inqueue[v] = 2; /* relax the adjacent edges */ for (i=xadj[v]; i<xadj[v+1]; i++) { u = adjncy[i]; if (inqueue[u] == 2) continue; if (sps[u] < 0 || sps[v]+adjwgt[i] < sps[u]) { sps[u] = sps[v]+adjwgt[i]; if (inqueue[u]) gk_i32pqUpdate(queue, u, -sps[u]); else { gk_i32pqInsert(queue, u, -sps[u]); inqueue[u] = 1; } } } } *r_sps = (void *)sps; gk_i32pqDestroy(queue); } else { gk_fpq_t *queue; float *adjwgt; float *sps; adjwgt = graph->fadjwgt; queue = gk_fpqCreate(nvtxs); gk_fpqInsert(queue, v, 0); inqueue[v] = 1; sps = gk_fsmalloc(nvtxs, -1, "gk_graph_SingleSourceShortestPaths: sps"); sps[v] = 0; /* start processing the nodes */ while ((v = gk_fpqGetTop(queue)) != -1) { inqueue[v] = 2; /* relax the adjacent edges */ for (i=xadj[v]; i<xadj[v+1]; i++) { u = adjncy[i]; if (inqueue[u] == 2) continue; if (sps[u] < 0 || sps[v]+adjwgt[i] < sps[u]) { sps[u] = sps[v]+adjwgt[i]; if (inqueue[u]) gk_fpqUpdate(queue, u, -sps[u]); else { gk_fpqInsert(queue, u, -sps[u]); inqueue[u] = 1; } } } } *r_sps = (void *)sps; gk_fpqDestroy(queue); } gk_free((void **)&inqueue, LTERM); } #ifdef XXX /*************************************************************************/ /*! Sorts the adjacency lists in increasing vertex order \param graph the graph itself, */ /**************************************************************************/ void gk_graph_SortAdjacencies(gk_graph_t *graph) { int n, nn=0; ssize_t *ptr; int *ind; float *val; switch (what) { case GK_CSR_ROW: if (!graph->rowptr) gk_errexit(SIGERR, "Row-based view of the graphrix does not exists.\n"); n = graph->nrows; ptr = graph->rowptr; ind = graph->rowind; val = graph->rowval; break; case GK_CSR_COL: if (!graph->colptr) gk_errexit(SIGERR, "Column-based view of the graphrix does not exists.\n"); n = graph->ncols; ptr = graph->colptr; ind = graph->colind; val = graph->colval; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } #pragma omp parallel if (n > 100) { ssize_t i, j, k; gk_ikv_t *cand; float *tval; #pragma omp single for (i=0; i<n; i++) nn = gk_max(nn, ptr[i+1]-ptr[i]); cand = gk_ikvmalloc(nn, "gk_graph_SortIndices: cand"); tval = gk_fmalloc(nn, "gk_graph_SortIndices: tval"); #pragma omp for schedule(static) for (i=0; i<n; i++) { for (k=0, j=ptr[i]; j<ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]) k = 1; /* an inversion */ cand[j-ptr[i]].val = j-ptr[i]; cand[j-ptr[i]].key = ind[j]; tval[j-ptr[i]] = val[j]; } if (k) { gk_ikvsorti(ptr[i+1]-ptr[i], cand); for (j=ptr[i]; j<ptr[i+1]; j++) { ind[j] = cand[j-ptr[i]].key; val[j] = tval[cand[j-ptr[i]].val]; } } } gk_free((void **)&cand, &tval, LTERM); } } /*************************************************************************/ /*! Returns a subgraphrix containing a certain set of rows. \param graph is the original graphrix. \param nrows is the number of rows to extract. \param rind is the set of row numbers to extract. \returns the row structure of the newly created subgraphrix. */ /**************************************************************************/ gk_graph_t *gk_graph_ExtractRows(gk_graph_t *graph, int nrows, int *rind) { ssize_t i, ii, j, nnz; gk_graph_t *ngraph; ngraph = gk_graph_Create(); ngraph->nrows = nrows; ngraph->ncols = graph->ncols; for (nnz=0, i=0; i<nrows; i++) nnz += graph->rowptr[rind[i]+1]-graph->rowptr[rind[i]]; ngraph->rowptr = gk_zmalloc(ngraph->nrows+1, "gk_graph_ExtractPartition: rowptr"); ngraph->rowind = gk_imalloc(nnz, "gk_graph_ExtractPartition: rowind"); ngraph->rowval = gk_fmalloc(nnz, "gk_graph_ExtractPartition: rowval"); ngraph->rowptr[0] = 0; for (nnz=0, j=0, ii=0; ii<nrows; ii++) { i = rind[ii]; gk_icopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowind+graph->rowptr[i], ngraph->rowind+nnz); gk_fcopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowval+graph->rowptr[i], ngraph->rowval+nnz); nnz += graph->rowptr[i+1]-graph->rowptr[i]; ngraph->rowptr[++j] = nnz; } ASSERT(j == ngraph->nrows); return ngraph; } /*************************************************************************/ /*! Returns a subgraphrix corresponding to a specified partitioning of rows. \param graph is the original graphrix. \param part is the partitioning vector of the rows. \param pid is the partition ID that will be extracted. \returns the row structure of the newly created subgraphrix. */ /**************************************************************************/ gk_graph_t *gk_graph_ExtractPartition(gk_graph_t *graph, int *part, int pid) { ssize_t i, j, nnz; gk_graph_t *ngraph; ngraph = gk_graph_Create(); ngraph->nrows = 0; ngraph->ncols = graph->ncols; for (nnz=0, i=0; i<graph->nrows; i++) { if (part[i] == pid) { ngraph->nrows++; nnz += graph->rowptr[i+1]-graph->rowptr[i]; } } ngraph->rowptr = gk_zmalloc(ngraph->nrows+1, "gk_graph_ExtractPartition: rowptr"); ngraph->rowind = gk_imalloc(nnz, "gk_graph_ExtractPartition: rowind"); ngraph->rowval = gk_fmalloc(nnz, "gk_graph_ExtractPartition: rowval"); ngraph->rowptr[0] = 0; for (nnz=0, j=0, i=0; i<graph->nrows; i++) { if (part[i] == pid) { gk_icopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowind+graph->rowptr[i], ngraph->rowind+nnz); gk_fcopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowval+graph->rowptr[i], ngraph->rowval+nnz); nnz += graph->rowptr[i+1]-graph->rowptr[i]; ngraph->rowptr[++j] = nnz; } } ASSERT(j == ngraph->nrows); return ngraph; } /*************************************************************************/ /*! Splits the graphrix into multiple sub-graphrices based on the provided color array. \param graph is the original graphrix. \param color is an array of size equal to the number of non-zeros in the graphrix (row-wise structure). The graphrix is split into as many parts as the number of colors. For meaningfull results, the colors should be numbered consecutively starting from 0. \returns an array of graphrices for each supplied color number. */ /**************************************************************************/ gk_graph_t **gk_graph_Split(gk_graph_t *graph, int *color) { ssize_t i, j; int nrows, ncolors; ssize_t *rowptr; int *rowind; float *rowval; gk_graph_t **sgraphs; nrows = graph->nrows; rowptr = graph->rowptr; rowind = graph->rowind; rowval = graph->rowval; ncolors = gk_imax(rowptr[nrows], color)+1; sgraphs = (gk_graph_t **)gk_malloc(sizeof(gk_graph_t *)*ncolors, "gk_graph_Split: sgraphs"); for (i=0; i<ncolors; i++) { sgraphs[i] = gk_graph_Create(); sgraphs[i]->nrows = graph->nrows; sgraphs[i]->ncols = graph->ncols; sgraphs[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_graph_Split: sgraphs[i]->rowptr"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) sgraphs[color[j]]->rowptr[i]++; } for (i=0; i<ncolors; i++) MAKECSR(j, nrows, sgraphs[i]->rowptr); for (i=0; i<ncolors; i++) { sgraphs[i]->rowind = gk_imalloc(sgraphs[i]->rowptr[nrows], "gk_graph_Split: sgraphs[i]->rowind"); sgraphs[i]->rowval = gk_fmalloc(sgraphs[i]->rowptr[nrows], "gk_graph_Split: sgraphs[i]->rowval"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { sgraphs[color[j]]->rowind[sgraphs[color[j]]->rowptr[i]] = rowind[j]; sgraphs[color[j]]->rowval[sgraphs[color[j]]->rowptr[i]] = rowval[j]; sgraphs[color[j]]->rowptr[i]++; } } for (i=0; i<ncolors; i++) SHIFTCSR(j, nrows, sgraphs[i]->rowptr); return sgraphs; } /*************************************************************************/ /*! Prunes certain rows/columns of the graphrix. The prunning takes place by analyzing the row structure of the graphrix. The prunning takes place by removing rows/columns but it does not affect the numbering of the remaining rows/columns. \param graph the graphrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the graphrix will be prunned, \param minf is the minimum number of rows (columns) that a column (row) must be present in order to be kept, \param maxf is the maximum number of rows (columns) that a column (row) must be present at in order to be kept. \returns the prunned graphrix consisting only of its row-based structure. The input graphrix is not modified. */ /**************************************************************************/ gk_graph_t *gk_graph_Prune(gk_graph_t *graph, int what, int minf, int maxf) { ssize_t i, j, nnz; int nrows, ncols; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind, *collen; float *rowval, *nrowval; gk_graph_t *ngraph; ngraph = gk_graph_Create(); nrows = ngraph->nrows = graph->nrows; ncols = ngraph->ncols = graph->ncols; rowptr = graph->rowptr; rowind = graph->rowind; rowval = graph->rowval; nrowptr = ngraph->rowptr = gk_zmalloc(nrows+1, "gk_graph_Prune: nrowptr"); nrowind = ngraph->rowind = gk_imalloc(rowptr[nrows], "gk_graph_Prune: nrowind"); nrowval = ngraph->rowval = gk_fmalloc(rowptr[nrows], "gk_graph_Prune: nrowval"); switch (what) { case GK_CSR_COL: collen = gk_ismalloc(ncols, 0, "gk_graph_Prune: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { ASSERT(rowind[j] < ncols); collen[rowind[j]]++; } } for (i=0; i<ncols; i++) collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (collen[rowind[j]]) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } gk_free((void **)&collen, LTERM); break; case GK_CSR_ROW: nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) { for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; } } nrowptr[i+1] = nnz; } break; default: gk_graph_Free(&ngraph); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return ngraph; } /*************************************************************************/ /*! Normalizes the rows/columns of the graphrix to be unit length. \param graph the graphrix itself, \param what indicates what will be normalized and is obtained by specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL. \param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm */ /**************************************************************************/ void gk_graph_Normalize(gk_graph_t *graph, int what, int norm) { ssize_t i, j; int n; ssize_t *ptr; float *val, sum; if (what&GK_CSR_ROW && graph->rowval) { n = graph->nrows; ptr = graph->rowptr; val = graph->rowval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){ if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; /* assume val[j] > 0 */ } if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } if (what&GK_CSR_COL && graph->colval) { n = graph->ncols; ptr = graph->colptr; val = graph->colval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++) if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } } #endif
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(8*t1+Ny+7,16)),floord(16*t2+Ny+3,16)),floord(16*t1-16*t2+Nz+Ny+5,16));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-115,128)),ceild(16*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(8*t1+Nx+7,128)),floord(16*t2+Nx+3,128)),floord(16*t3+Nx+3,128)),floord(16*t1-16*t2+Nz+Nx+5,128));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),4*t3+2),32*t4+30);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "dark_cuda.h" #include "box.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define NUMCHARS 37 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = random_gen()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed) { int speed = rand_int(1, augment_speed); if (speed < 1) speed = 1; char** sequentia_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d, mini_batch = %d \n", n, mini_batch); unsigned int *start_time_indexes = (unsigned int *)xcalloc(mini_batch, sizeof(unsigned int)); for (i = 0; i < mini_batch; ++i) { start_time_indexes[i] = random_gen() % m; //printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]); } for (i = 0; i < n; ++i) { do { int time_line_index = i % mini_batch; unsigned int index = start_time_indexes[time_line_index] % m; start_time_indexes[time_line_index] += speed; //int index = random_gen() % m; sequentia_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf(" index = %u - grp: %s \n", index, paths[index]); if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]); } while (strlen(sequentia_paths[i]) == 0); } free(start_time_indexes); pthread_mutex_unlock(&mutex); return sequentia_paths; } char **get_random_paths(char **paths, int n, int m) { char** random_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d \n", n); for(i = 0; i < n; ++i){ do { int index = random_gen() % m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf("grp: %s\n", paths[index]); if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]); } while (strlen(random_paths[i]) == 0); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char** replace_paths = (char**)xcalloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int dontuse_opencv) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ int size = w > h ? w : h; image im; if(dontuse_opencv) im = load_image_stb_resize(paths[i], 0, 0, 3); else im = load_image_color(paths[i], 0, 0); image crop = random_augment_image(im, angle, aspect, min, max, size); int flip = use_flip ? random_gen() % 2 : 0; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); image sized = resize_image(crop, w, h); //show_image(im, "orig"); //show_image(sized, "sized"); //show_image(sized, paths[i]); //wait_until_press_key_cv(); //printf("w = %d, h = %d \n", sized.w, sized.h); free_image(im); free_image(crop); X.vals[i] = sized.data; X.cols = sized.h*sized.w*sized.c; } return X; } extern int check_mistakes; box_label *read_boxes(char *filename, int *n) { box_label* boxes = (box_label*)xcalloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename); //file_error(filename); FILE* fw = fopen("bad.list", "a"); fwrite(filename, sizeof(char), strlen(filename), fw); char *new_line = "\n"; fwrite(new_line, sizeof(char), strlen(new_line), fw); fclose(fw); if (check_mistakes) { printf("\n Error in read_boxes() \n"); getchar(); } *n = 0; return boxes; } float x, y, h, w; int id; int count = 0; while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ boxes = (box_label*)xrealloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = random_gen()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 || (boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .001 || h < .001) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int net_w, int net_h) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; int i; box_label *boxes = read_boxes(labelpath, &count); int min_w_h = 0; float lowest_w = 1.F / net_w; float lowest_h = 1.F / net_h; randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; // not detect small objects //if ((w < 0.001F || h < 0.001F)) continue; // if truth (box for object) is smaller than 1x1 pix char buff[256]; if (id >= classes) { printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d], file: %s \n", id, (classes-1), labelpath); sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1)); system(buff); if (check_mistakes) getchar(); ++sub; continue; } if ((w < lowest_w || h < lowest_h)) { //sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath); //system(buff); ++sub; continue; } if (x == 999999 || y == 999999) { printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1, file: %s \n", labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (x <= 0 || x > 1 || y <= 0 || y > 1) { printf("\n Wrong annotation: x = %f, y = %f, file: %s \n", x, y, labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (w > 1) { printf("\n Wrong annotation: w = %f, file: %s \n", w, labelpath); sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w); system(buff); w = 1; if (check_mistakes) getchar(); } if (h > 1) { printf("\n Wrong annotation: h = %f, file: %s \n", h, labelpath); sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h); system(buff); h = 1; if (check_mistakes) getchar(); } if (x == 0) x += lowest_w; if (y == 0) y += lowest_h; truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; if (min_w_h == 0) min_w_h = w*net_w; if (min_w_h > w*net_w) min_w_h = w*net_w; if (min_w_h > h*net_h) min_w_h = h*net_h; } free(boxes); return min_w_h; } void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_truth_smooth(char *path, char **labels, int k, float *truth, float label_smooth_eps) { int i; memset(truth, 0, k * sizeof(float)); int count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { truth[i] = (1 - label_smooth_eps); ++count; } else { truth[i] = label_smooth_eps / (k - 1); } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy, float label_smooth_eps) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth_smooth(paths[i], labels, k, y.vals[i], label_smooth_eps); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels_custom(char *filename, int *size) { list *plist = get_paths(filename); if(size) *size = plist->size; char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } char **get_labels(char *filename) { return get_labels_custom(filename, NULL); } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)xcalloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = random_gen()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } void blend_truth(float *new_truth, int boxes, float *old_truth) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + t*t_size; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; new_truth_ptr[0] = old_truth_ptr[0]; new_truth_ptr[1] = old_truth_ptr[1]; new_truth_ptr[2] = old_truth_ptr[2]; new_truth_ptr[3] = old_truth_ptr[3]; new_truth_ptr[4] = old_truth_ptr[4]; } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } void blend_truth_mosaic(float *new_truth, int boxes, float *old_truth, int w, int h, float cut_x, float cut_y, int i_mixup, int left_shift, int right_shift, int top_shift, int bot_shift) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } int new_t = count_new_truth; for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + new_t*t_size; new_truth_ptr[0] = 0; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; float xb = old_truth_ptr[0]; float yb = old_truth_ptr[1]; float wb = old_truth_ptr[2]; float hb = old_truth_ptr[3]; // shift 4 images if (i_mixup == 0) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 1) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 2) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } if (i_mixup == 3) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } int left = (xb - wb / 2)*w; int right = (xb + wb / 2)*w; int top = (yb - hb / 2)*h; int bot = (yb + hb / 2)*h; // fix out of bound if (left < 0) { float diff = (float)left / w; xb = xb - diff / 2; wb = wb + diff; } if (right > w) { float diff = (float)(right - w) / w; xb = xb - diff / 2; wb = wb - diff; } if (top < 0) { float diff = (float)top / h; yb = yb - diff / 2; hb = hb + diff; } if (bot > h) { float diff = (float)(bot - h) / h; yb = yb - diff / 2; hb = hb - diff; } left = (xb - wb / 2)*w; right = (xb + wb / 2)*w; top = (yb - hb / 2)*h; bot = (yb + hb / 2)*h; // leave only within the image if(left >= 0 && right <= w && top >= 0 && bot <= h && wb > 0 && wb < 1 && hb > 0 && hb < 1 && xb > 0 && xb < 1 && yb > 0 && yb < 1) { new_truth_ptr[0] = xb; new_truth_ptr[1] = yb; new_truth_ptr[2] = wb; new_truth_ptr[3] = hb; new_truth_ptr[4] = old_truth_ptr[4]; new_t++; } } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } #ifdef OPENCV #include "http_stream.h" data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; assert(use_mixup != 2); if (use_mixup == 3 && letter_box) { printf("\n Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters \n"); exit(0); } if (random_gen() % 2 == 0) use_mixup = 0; int i; int *cut_x = NULL, *cut_y = NULL; if (use_mixup == 3) { cut_x = (int*)calloc(n, sizeof(int)); cut_y = (int*)calloc(n, sizeof(int)); const float min_offset = 0.2; // 20% for (i = 0; i < n; ++i) { cut_x[i] = rand_int(w*min_offset, w*(1 - min_offset)); cut_y[i] = rand_int(h*min_offset, h*(1 - min_offset)); } } data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5*boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= use_mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1) char **random_paths; if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(5 * boxes, sizeof(float)); const char *filename = random_paths[i]; int flag = (c >= 3); mat_cv *src; src = load_image_mat_cv(filename, flag); if (src == NULL) { if (check_mistakes) { printf("\n Error in load_data_detection() - OpenCV \n"); getchar(); } continue; } int oh = get_height_mat(src); int ow = get_width_mat(src); int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; if (use_blur) { int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image if (tmp_blur == 0) blur = 0; else if (tmp_blur == 1) blur = 1; else blur = use_blur; } } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); //float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh)/2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow)/2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if ((min_w_h / 8) < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, blur, boxes, truth); if (use_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); } else if (use_mixup == 1) { if (i_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); } else if (i_mixup == 1) { image old_img = make_empty_image(w, h, c); old_img.data = d.X.vals[i]; //show_image(ai, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images_cv(ai, 0.5, old_img, 0.5); blend_truth(d.y.vals[i], boxes, truth); free_image(old_img); d.X.vals[i] = ai.data; } } else if (use_mixup == 3) { if (i_mixup == 0) { image tmp_img = make_image(w, h, c); d.X.vals[i] = tmp_img.data; } if (flip) { int tmp = pleft; pleft = pright; pright = tmp; } const int left_shift = min_val_cmp(cut_x[i], max_val_cmp(0, (-pleft*w / ow))); const int top_shift = min_val_cmp(cut_y[i], max_val_cmp(0, (-ptop*h / oh))); const int right_shift = min_val_cmp((w - cut_x[i]), max_val_cmp(0, (-pright*w / ow))); const int bot_shift = min_val_cmp(h - cut_y[i], max_val_cmp(0, (-pbot*h / oh))); int k, x, y; for (k = 0; k < c; ++k) { for (y = 0; y < h; ++y) { int j = y*w + k*w*h; if (i_mixup == 0 && y < cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 1 && y < cut_y[i]) { int j_src = left_shift + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w-cut_x[i]) * sizeof(float)); } if (i_mixup == 2 && y >= cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 3 && y >= cut_y[i]) { int j_src = left_shift + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w - cut_x[i]) * sizeof(float)); } } } blend_truth_mosaic(d.y.vals[i], boxes, truth, w, h, cut_x[i], cut_y[i], i_mixup, left_shift, right_shift, top_shift, bot_shift); free_image(ai); ai.data = d.X.vals[i]; } if (show_imgs && i_mixup == use_mixup) // delete i_mixup { image tmp_ai = copy_image(ai); char buff[1000]; //sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); sprintf(buff, "aug_%d_%d_%d", random_index, i, random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*ai.w; int right = (b.x + b.w / 2.)*ai.w; int top = (b.y - b.h / 2.)*ai.h; int bot = (b.y + b.h / 2.)*ai.h; draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(tmp_ai, buff); if (show_imgs == 1) { //char buff_src[1000]; //sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); //show_image_mat(src, buff_src); show_image(tmp_ai, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); free_image(tmp_ai); } release_mat(&src); free(truth); } if (random_paths) free(random_paths); } return d; } #else // OPENCV void blend_images(image new_img, float alpha, image old_img, float beta) { int data_size = new_img.w * new_img.h * new_img.c; int i; #pragma omp parallel for for (i = 0; i < data_size; ++i) new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta; } data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); assert(use_mixup < 2); int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale; float dhue = 0, dsat = 0, dexp = 0, flip = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5 * boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(5 * boxes, sizeof(float)); char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; image orig = load_image(filename, 0, 0, c); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh) / 2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow) / 2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); distort_image(sized, dhue, dsat, dexp); //random_distort_image(sized, hue, saturation, exposure); fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (i_mixup) { image old_img = sized; old_img.data = d.X.vals[i]; //show_image(sized, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images(sized, 0.5, old_img, 0.5); blend_truth(truth, boxes, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = sized.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); if (show_imgs)// && i_mixup) { char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*sized.w; int right = (b.x + b.w / 2.)*sized.w; int top = (b.y - b.h / 2.)*sized.h; int bot = (b.y + b.h / 2.)*sized.h; draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(sized, buff); if (show_imgs == 1) { show_image(sized, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n"); //getchar(); } free_image(orig); free_image(cropped); free(truth); } } free(random_paths); if (mixup_random_paths) free(mixup_random_paths); return d; } #endif // OPENCV void *load_thread(void *ptr) { //srand(time(0)); //printf("Loading data: %d\n", random_gen()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.mixup, a.blur, a.show_imgs, a.label_smooth_eps, a.dontuse_opencv); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.blur, a.mixup, a.jitter, a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = resize_image(*(a.im), a.w, a.h); }else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { //srand(time(0)); int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data* buffers = (data*)xcalloc(args.threads, sizeof(data)); pthread_t* threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } pthread_t load_data(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)xcalloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)xcalloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = random_gen()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int use_mixup, int use_blur, int show_imgs, float label_smooth_eps, int dontuse_opencv) { char **paths_stored = paths; if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d.y = load_labels_paths(paths, n, labels, k, hierarchy, label_smooth_eps); if (use_mixup && rand_int(0, 1)) { char **paths_mix = get_random_paths(paths_stored, n, m); data d2 = { 0 }; d2.shallow = 0; d2.X = load_image_augment_paths(paths_mix, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d2.y = load_labels_paths(paths_mix, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix); data d3 = { 0 }; d3.shallow = 0; data d4 = { 0 }; d4.shallow = 0; if (use_mixup >= 3) { char **paths_mix3 = get_random_paths(paths_stored, n, m); d3.X = load_image_augment_paths(paths_mix3, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d3.y = load_labels_paths(paths_mix3, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix3); char **paths_mix4 = get_random_paths(paths_stored, n, m); d4.X = load_image_augment_paths(paths_mix4, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d4.y = load_labels_paths(paths_mix4, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix4); } // mix int i, j; for (i = 0; i < d2.X.rows; ++i) { int mixup = use_mixup; if (use_mixup == 4) mixup = rand_int(2, 3); // alternate CutMix and Mosaic // MixUp ----------------------------------- if (mixup == 1) { // mix images for (j = 0; j < d2.X.cols; ++j) { d.X.vals[i][j] = (d.X.vals[i][j] + d2.X.vals[i][j]) / 2.0f; } // mix labels for (j = 0; j < d2.y.cols; ++j) { d.y.vals[i][j] = (d.y.vals[i][j] + d2.y.vals[i][j]) / 2.0f; } } // CutMix ----------------------------------- else if (mixup == 2) { const float min = 0.3; // 0.3*0.3 = 9% const float max = 0.8; // 0.8*0.8 = 64% const int cut_w = rand_int(w*min, w*max); const int cut_h = rand_int(h*min, h*max); const int cut_x = rand_int(0, w - cut_w - 1); const int cut_y = rand_int(0, h - cut_h - 1); const int left = cut_x; const int right = cut_x + cut_w; const int top = cut_y; const int bot = cut_y + cut_h; assert(cut_x >= 0 && cut_x <= w); assert(cut_y >= 0 && cut_y <= h); assert(cut_w >= 0 && cut_w <= w); assert(cut_h >= 0 && cut_h <= h); assert(right >= 0 && right <= w); assert(bot >= 0 && bot <= h); assert(top <= bot); assert(left <= right); const float alpha = (float)(cut_w*cut_h) / (float)(w*h); const float beta = 1 - alpha; int c, x, y; for (c = 0; c < 3; ++c) { for (y = top; y < bot; ++y) { for (x = left; x < right; ++x) { int j = x + y*w + c*w*h; d.X.vals[i][j] = d2.X.vals[i][j]; } } } //printf("\n alpha = %f, beta = %f \n", alpha, beta); // mix labels for (j = 0; j < d.y.cols; ++j) { d.y.vals[i][j] = d.y.vals[i][j] * beta + d2.y.vals[i][j] * alpha; } } // Mosaic ----------------------------------- else if (mixup == 3) { const float min_offset = 0.2; // 20% const int cut_x = rand_int(w*min_offset, w*(1 - min_offset)); const int cut_y = rand_int(h*min_offset, h*(1 - min_offset)); float s1 = (float)(cut_x * cut_y) / (w*h); float s2 = (float)((w - cut_x) * cut_y) / (w*h); float s3 = (float)(cut_x * (h - cut_y)) / (w*h); float s4 = (float)((w - cut_x) * (h - cut_y)) / (w*h); int c, x, y; for (c = 0; c < 3; ++c) { for (y = 0; y < h; ++y) { for (x = 0; x < w; ++x) { int j = x + y*w + c*w*h; if (x < cut_x && y < cut_y) d.X.vals[i][j] = d.X.vals[i][j]; if (x >= cut_x && y < cut_y) d.X.vals[i][j] = d2.X.vals[i][j]; if (x < cut_x && y >= cut_y) d.X.vals[i][j] = d3.X.vals[i][j]; if (x >= cut_x && y >= cut_y) d.X.vals[i][j] = d4.X.vals[i][j]; } } } for (j = 0; j < d.y.cols; ++j) { const float max_s = 1;// max_val_cmp(s1, max_val_cmp(s2, max_val_cmp(s3, s4))); d.y.vals[i][j] = d.y.vals[i][j] * s1 / max_s + d2.y.vals[i][j] * s2 / max_s + d3.y.vals[i][j] * s3 / max_s + d4.y.vals[i][j] * s4 / max_s; } } } free_data(d2); if (use_mixup >= 3) { free_data(d3); free_data(d4); } } #ifdef OPENCV if (use_blur) { int i; for (i = 0; i < d.X.rows; ++i) { if (random_gen() % 2) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; int ksize = use_blur; if (use_blur == 1) ksize = 17; image blurred = blur_image(im, ksize); free_image(im); d.X.vals[i] = blurred.data; //if (i == 0) { // show_image(im, "Not blurred"); // show_image(blurred, "blurred"); // wait_until_press_key_cv(); //} } } } #endif // OPENCV if (show_imgs) { int i, j; for (i = 0; i < d.X.rows; ++i) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; char buff[1000]; sprintf(buff, "aug_%d_%s_%d", i, basecfg((char*)paths[i]), random_gen()); save_image(im, buff); char buff_string[1000]; sprintf(buff_string, "\n Classes: "); for (j = 0; j < d.y.cols; ++j) { if (d.y.vals[i][j] > 0) { char buff_tmp[100]; sprintf(buff_tmp, " %d (%f), ", j, d.y.vals[i][j]); strcat(buff_string, buff_tmp); } } printf("%s \n", buff_string); if (show_imgs == 1) { show_image(im, buff); wait_until_press_key_cv(); } } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); } if (m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = w; d.h = h; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)xcalloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data newdata = concat_data(d[i], out); free_data(out); out = newdata; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } //translate_data_rows(d, -128); scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = random_gen()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i+b*10000][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); //translate_data_rows(d, -128); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = random_gen()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)xcalloc(num, sizeof(float*)); r.y.vals = (float**)xcalloc(num, sizeof(float*)); int i; for(i = 0; i < num; ++i){ int index = random_gen()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data* split = (data*)xcalloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train ={0}; data test ={0}; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)xcalloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)xcalloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)xcalloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)xcalloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }