source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_unop__exp2_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__exp2_fc32_fc32) // op(A') function: GB (_unop_tran__exp2_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_cexp2f (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cexp2f (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_cexp2f (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP2 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__exp2_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_cexp2f (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_cexp2f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__exp2_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bshift_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint8) // C=scalar+B GB (_bind1st__bshift_uint8) // C=scalar+B' GB (_bind1st_tran__bshift_uint8) // C=A+scalar GB (_bind2nd__bshift_uint8) // C=A'+scalar GB (_bind2nd_tran__bshift_uint8) // C type: uint8_t // A type: uint8_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_uint8 (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint8 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT8 || GxB_NO_BSHIFT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bshift_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint8 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint8 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Example_declare_target.3.c
/* * @@name: declare_target.3c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_4.0 */ #define N 1000 #pragma omp declare target float p[N], v1[N], v2[N]; #pragma omp end declare target extern void init(float *, float *, int); extern void output(float *, int); void vec_mult() { int i; init(v1, v2, N); #pragma omp target update to(v1, v2) #pragma omp target #pragma omp parallel for for (i=0; i<N; i++) p[i] = v1[i] * v2[i]; #pragma omp target update from(p) output(p, N); }
mergesort.c
#include <stdio.h> #include <stdlib.h> int arr[20]; // array to be sorted int main() { int n,i; const int p = 4; printf("Introduce el tamanyo del vector\n"); // input the elements scanf("%d",&n); printf("Introduce los elementos del vector\n"); // input the elements for(i=0; i<n; i++) scanf("%d",&arr[i]); #pragma omp parallel #pragma omp single merge_sort(arr,0,n-1); // sort the array printf("Vector ordenado: "); // print sorted array for(i=0; i<n-1; i++) printf("%d, ",arr[i]); printf("%d\n",arr[n-1]); return 0; } int merge_sort(int arr[],int low,int high) { int mid; if(low<high) { mid=(low+high)/2; // Divide and Conquer #pragma omp task merge_sort(arr,low,mid); #pragma omp task merge_sort(arr,mid+1,high); // Combine #pragma omp taskwait merge(arr,low,mid,high); } return 0; } int merge(int arr[],int l,int m,int h) { int arr1[10],arr2[10]; // Two temporary arrays to hold the two arrays to be merged int n1,n2,i,j,k; n1=m-l+1; n2=h-m; for(i=0; i<n1; i++) arr1[i]=arr[l+i]; for(j=0; j<n2; j++) arr2[j]=arr[m+j+1]; arr1[i]=9999; // To mark the end of each temporary array arr2[j]=9999; i=0; j=0; for(k=l; k<=h; k++) { //process of combining two sorted arrays if(arr1[i]<=arr2[j]) arr[k]=arr1[i++]; else arr[k]=arr2[j++]; } return 0; }
c-parser.c
/* Parser for C and Objective-C. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. Parser actions based on the old Bison parser; structure somewhat influenced by and fragments based on the C++ parser. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* TODO: Make sure all relevant comments, and all relevant code from all actions, brought over from old parser. Verify exact correspondence of syntax accepted. Add testcases covering every input symbol in every state in old and new parsers. Include full syntax for GNU C, including erroneous cases accepted with error messages, in syntax productions in comments. Make more diagnostics in the front end generally take an explicit location rather than implicitly using input_location. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "langhooks.h" #include "input.h" #include "cpplib.h" #include "timevar.h" #include "c-pragma.h" #include "c-tree.h" #include "flags.h" #include "output.h" #include "toplev.h" #include "ggc.h" #include "c-common.h" #include "vec.h" #include "target.h" #include "cgraph.h" /* Miscellaneous data and functions needed for the parser. */ int yydebug; /* Objective-C specific parser/lexer information. */ static int objc_pq_context = 0; /* The following flag is needed to contextualize Objective-C lexical analysis. In some cases (e.g., 'int NSObject;'), it is undesirable to bind an identifier to an Objective-C class, even if a class with that name exists. */ static int objc_need_raw_identifier = 0; #define OBJC_NEED_RAW_IDENTIFIER(VAL) \ do { \ if (c_dialect_objc ()) \ objc_need_raw_identifier = VAL; \ } while (0) /* APPLE LOCAL begin C* property (Radar 4436866) (in 4.2 d) */ /* For checking property attribute keywords */ static int objc_property_attr_context; /* APPLE LOCAL end C* property (Radar 4436866) (in 4.2 d) */ /* APPLE LOCAL radar 3803157 - objc attribute (in 4.2 e) */ static tree objc_method_attributes; /* APPLE LOCAL begin C* language (in 4.2 f) */ /* For checking for 'foreach' context. */ static int objc_foreach_context; /* APPLE LOCAL end C* language (in 4.2 f) */ /* The reserved keyword table. */ struct resword { const char *word; ENUM_BITFIELD(rid) rid : 16; unsigned int disable : 16; }; /* Disable mask. Keywords are disabled if (reswords[i].disable & mask) is _true_. */ #define D_C89 0x01 /* not in C89 */ #define D_EXT 0x02 /* GCC extension */ #define D_EXT89 0x04 /* GCC extension incorporated in C99 */ #define D_OBJC 0x08 /* Objective C only */ static const struct resword reswords[] = { { "_Bool", RID_BOOL, 0 }, { "_Complex", RID_COMPLEX, 0 }, { "_Decimal32", RID_DFLOAT32, D_EXT }, { "_Decimal64", RID_DFLOAT64, D_EXT }, { "_Decimal128", RID_DFLOAT128, D_EXT }, { "__FUNCTION__", RID_FUNCTION_NAME, 0 }, { "__PRETTY_FUNCTION__", RID_PRETTY_FUNCTION_NAME, 0 }, { "__alignof", RID_ALIGNOF, 0 }, { "__alignof__", RID_ALIGNOF, 0 }, { "__asm", RID_ASM, 0 }, { "__asm__", RID_ASM, 0 }, { "__attribute", RID_ATTRIBUTE, 0 }, { "__attribute__", RID_ATTRIBUTE, 0 }, { "__builtin_choose_expr", RID_CHOOSE_EXPR, 0 }, { "__builtin_offsetof", RID_OFFSETOF, 0 }, { "__builtin_types_compatible_p", RID_TYPES_COMPATIBLE_P, 0 }, { "__builtin_va_arg", RID_VA_ARG, 0 }, { "__complex", RID_COMPLEX, 0 }, { "__complex__", RID_COMPLEX, 0 }, { "__const", RID_CONST, 0 }, { "__const__", RID_CONST, 0 }, { "__extension__", RID_EXTENSION, 0 }, { "__func__", RID_C99_FUNCTION_NAME, 0 }, { "__imag", RID_IMAGPART, 0 }, { "__imag__", RID_IMAGPART, 0 }, { "__inline", RID_INLINE, 0 }, { "__inline__", RID_INLINE, 0 }, { "__label__", RID_LABEL, 0 }, { "__real", RID_REALPART, 0 }, { "__real__", RID_REALPART, 0 }, { "__restrict", RID_RESTRICT, 0 }, { "__restrict__", RID_RESTRICT, 0 }, { "__signed", RID_SIGNED, 0 }, { "__signed__", RID_SIGNED, 0 }, { "__thread", RID_THREAD, 0 }, { "__typeof", RID_TYPEOF, 0 }, { "__typeof__", RID_TYPEOF, 0 }, { "__volatile", RID_VOLATILE, 0 }, { "__volatile__", RID_VOLATILE, 0 }, { "asm", RID_ASM, D_EXT }, { "auto", RID_AUTO, 0 }, { "break", RID_BREAK, 0 }, { "case", RID_CASE, 0 }, { "char", RID_CHAR, 0 }, { "const", RID_CONST, 0 }, { "continue", RID_CONTINUE, 0 }, { "default", RID_DEFAULT, 0 }, { "do", RID_DO, 0 }, { "double", RID_DOUBLE, 0 }, { "else", RID_ELSE, 0 }, { "enum", RID_ENUM, 0 }, { "extern", RID_EXTERN, 0 }, { "float", RID_FLOAT, 0 }, { "for", RID_FOR, 0 }, { "goto", RID_GOTO, 0 }, { "if", RID_IF, 0 }, { "inline", RID_INLINE, D_EXT89 }, { "int", RID_INT, 0 }, { "long", RID_LONG, 0 }, { "register", RID_REGISTER, 0 }, { "restrict", RID_RESTRICT, D_C89 }, { "return", RID_RETURN, 0 }, { "short", RID_SHORT, 0 }, { "signed", RID_SIGNED, 0 }, { "sizeof", RID_SIZEOF, 0 }, { "static", RID_STATIC, 0 }, { "struct", RID_STRUCT, 0 }, { "switch", RID_SWITCH, 0 }, { "typedef", RID_TYPEDEF, 0 }, { "typeof", RID_TYPEOF, D_EXT }, { "union", RID_UNION, 0 }, { "unsigned", RID_UNSIGNED, 0 }, { "void", RID_VOID, 0 }, { "volatile", RID_VOLATILE, 0 }, { "while", RID_WHILE, 0 }, /* These Objective-C keywords are recognized only immediately after an '@'. */ { "class", RID_AT_CLASS, D_OBJC }, { "compatibility_alias", RID_AT_ALIAS, D_OBJC }, { "defs", RID_AT_DEFS, D_OBJC }, { "encode", RID_AT_ENCODE, D_OBJC }, { "end", RID_AT_END, D_OBJC }, { "implementation", RID_AT_IMPLEMENTATION, D_OBJC }, { "interface", RID_AT_INTERFACE, D_OBJC }, /* APPLE LOCAL begin C* language (in 4.2 j) */ { "optional", RID_AT_OPTIONAL, D_OBJC }, { "required", RID_AT_REQUIRED, D_OBJC }, /* APPLE LOCAL end C* language (in 4.2 j) */ /* APPLE LOCAL C* property (Radar 4436866) (in 4.2 k) */ { "property", RID_AT_PROPERTY, D_OBJC }, /* APPLE LOCAL radar 4564694 */ { "package", RID_AT_PACKAGE, D_OBJC }, { "private", RID_AT_PRIVATE, D_OBJC }, { "protected", RID_AT_PROTECTED, D_OBJC }, { "protocol", RID_AT_PROTOCOL, D_OBJC }, { "public", RID_AT_PUBLIC, D_OBJC }, { "selector", RID_AT_SELECTOR, D_OBJC }, { "throw", RID_AT_THROW, D_OBJC }, { "try", RID_AT_TRY, D_OBJC }, { "catch", RID_AT_CATCH, D_OBJC }, { "finally", RID_AT_FINALLY, D_OBJC }, { "synchronized", RID_AT_SYNCHRONIZED, D_OBJC }, /* These are recognized only in protocol-qualifier context (see above) */ { "bycopy", RID_BYCOPY, D_OBJC }, { "byref", RID_BYREF, D_OBJC }, { "in", RID_IN, D_OBJC }, { "inout", RID_INOUT, D_OBJC }, { "oneway", RID_ONEWAY, D_OBJC }, { "out", RID_OUT, D_OBJC }, /* APPLE LOCAL begin C* property (Radar 4436866) (in 4.2 l) */ /* These are recognized inside a property attribute list */ { "readonly", RID_READONLY, D_OBJC }, { "getter", RID_GETTER, D_OBJC }, { "setter", RID_SETTER, D_OBJC }, /* APPLE LOCAL end C* property (Radar 4436866) (in 4.2 l) */ /* APPLE LOCAL radar 4947014 - objc atomic property */ { "nonatomic", RID_NONATOMIC, D_OBJC }, }; #define N_reswords (sizeof reswords / sizeof (struct resword)) /* All OpenMP clauses. OpenMP 2.5. */ typedef enum pragma_omp_clause { PRAGMA_OMP_CLAUSE_NONE = 0, PRAGMA_OMP_CLAUSE_COPYIN, PRAGMA_OMP_CLAUSE_COPYPRIVATE, PRAGMA_OMP_CLAUSE_DEFAULT, PRAGMA_OMP_CLAUSE_FIRSTPRIVATE, PRAGMA_OMP_CLAUSE_IF, PRAGMA_OMP_CLAUSE_LASTPRIVATE, PRAGMA_OMP_CLAUSE_NOWAIT, PRAGMA_OMP_CLAUSE_NUM_THREADS, PRAGMA_OMP_CLAUSE_ORDERED, PRAGMA_OMP_CLAUSE_PRIVATE, PRAGMA_OMP_CLAUSE_REDUCTION, PRAGMA_OMP_CLAUSE_SCHEDULE, PRAGMA_OMP_CLAUSE_SHARED } pragma_omp_clause; /* Initialization routine for this file. */ void c_parse_init (void) { /* The only initialization required is of the reserved word identifiers. */ unsigned int i; tree id; int mask = (flag_isoc99 ? 0 : D_C89) | (flag_no_asm ? (flag_isoc99 ? D_EXT : D_EXT|D_EXT89) : 0); if (!c_dialect_objc ()) mask |= D_OBJC; ridpointers = GGC_CNEWVEC (tree, (int) RID_MAX); for (i = 0; i < N_reswords; i++) { /* If a keyword is disabled, do not enter it into the table and so create a canonical spelling that isn't a keyword. */ if (reswords[i].disable & mask) continue; id = get_identifier (reswords[i].word); C_RID_CODE (id) = reswords[i].rid; C_IS_RESERVED_WORD (id) = 1; ridpointers [(int) reswords[i].rid] = id; } } /* The C lexer intermediates between the lexer in cpplib and c-lex.c and the C parser. Unlike the C++ lexer, the parser structure stores the lexer information instead of using a separate structure. Identifiers are separated into ordinary identifiers, type names, keywords and some other Objective-C types of identifiers, and some look-ahead is maintained. ??? It might be a good idea to lex the whole file up front (as for C++). It would then be possible to share more of the C and C++ lexer code, if desired. */ /* The following local token type is used. */ /* A keyword. */ #define CPP_KEYWORD ((enum cpp_ttype) (N_TTYPES + 1)) /* More information about the type of a CPP_NAME token. */ typedef enum c_id_kind { /* An ordinary identifier. */ C_ID_ID, /* An identifier declared as a typedef name. */ C_ID_TYPENAME, /* An identifier declared as an Objective-C class name. */ C_ID_CLASSNAME, /* Not an identifier. */ C_ID_NONE } c_id_kind; /* A single C token after string literal concatenation and conversion of preprocessing tokens to tokens. */ typedef struct c_token GTY (()) { /* The kind of token. */ ENUM_BITFIELD (cpp_ttype) type : 8; /* If this token is a CPP_NAME, this value indicates whether also declared as some kind of type. Otherwise, it is C_ID_NONE. */ ENUM_BITFIELD (c_id_kind) id_kind : 8; /* If this token is a keyword, this value indicates which keyword. Otherwise, this value is RID_MAX. */ ENUM_BITFIELD (rid) keyword : 8; /* If this token is a CPP_PRAGMA, this indicates the pragma that was seen. Otherwise it is PRAGMA_NONE. */ ENUM_BITFIELD (pragma_kind) pragma_kind : 7; /* True if this token is from a system header. */ BOOL_BITFIELD in_system_header : 1; /* The value associated with this token, if any. */ tree value; /* The location at which this token was found. */ location_t location; } c_token; /* A parser structure recording information about the state and context of parsing. Includes lexer information with up to two tokens of look-ahead; more are not needed for C. */ typedef struct c_parser GTY(()) { /* The look-ahead tokens. */ c_token tokens[2]; /* How many look-ahead tokens are available (0, 1 or 2). */ short tokens_avail; /* True if a syntax error is being recovered from; false otherwise. c_parser_error sets this flag. It should clear this flag when enough tokens have been consumed to recover from the error. */ BOOL_BITFIELD error : 1; /* True if we're processing a pragma, and shouldn't automatically consume CPP_PRAGMA_EOL. */ BOOL_BITFIELD in_pragma : 1; } c_parser; /* The actual parser and external interface. ??? Does this need to be garbage-collected? */ static GTY (()) c_parser *the_parser; /* APPLE LOCAL C* language (in 4.2 ae) */ static c_token * c_parser_peek_2nd_token (c_parser *); /* Read in and lex a single token, storing it in *TOKEN. */ static void c_lex_one_token (c_token *token, c_parser *parser) { timevar_push (TV_LEX); token->type = c_lex_with_flags (&token->value, &token->location, NULL); token->id_kind = C_ID_NONE; token->keyword = RID_MAX; token->pragma_kind = PRAGMA_NONE; token->in_system_header = in_system_header; switch (token->type) { case CPP_NAME: { tree decl; int objc_force_identifier = objc_need_raw_identifier; OBJC_NEED_RAW_IDENTIFIER (0); if (C_IS_RESERVED_WORD (token->value)) { enum rid rid_code = C_RID_CODE (token->value); if (c_dialect_objc ()) { if (!OBJC_IS_AT_KEYWORD (rid_code) && (!OBJC_IS_PQ_KEYWORD (rid_code) || objc_pq_context)) { /* Return the canonical spelling for this keyword. */ token->value = ridpointers[(int) rid_code]; token->type = CPP_KEYWORD; token->keyword = rid_code; break; } /* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */ else if (objc_foreach_context && rid_code == RID_IN) { /* This is dangerous, we assume we don't need 3 input tokens look ahead. */ c_token *tk = c_parser_peek_2nd_token (parser); if (tk->type == CPP_NAME || tk->type == CPP_OPEN_PAREN || tk->type == CPP_MULT || tk->type == CPP_PLUS || tk->type == CPP_PLUS_PLUS || tk->type == CPP_MINUS || tk->type == CPP_MINUS_MINUS /* APPLE LOCAL radar 4529200 (in 4.2 af) */ || tk->type == CPP_OPEN_SQUARE) { token->type = CPP_KEYWORD; token->keyword = rid_code; break; } } /* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */ } else { /* Return the canonical spelling for this keyword. */ token->value = ridpointers[(int) rid_code]; token->type = CPP_KEYWORD; token->keyword = rid_code; break; } } decl = lookup_name (token->value); if (decl) { if (TREE_CODE (decl) == TYPE_DECL) { token->id_kind = C_ID_TYPENAME; break; } } else if (c_dialect_objc ()) { tree objc_interface_decl = objc_is_class_name (token->value); /* Objective-C class names are in the same namespace as variables and typedefs, and hence are shadowed by local declarations. */ if (objc_interface_decl && (global_bindings_p () || (!objc_force_identifier && !decl))) { token->value = objc_interface_decl; token->id_kind = C_ID_CLASSNAME; break; } } token->id_kind = C_ID_ID; } break; case CPP_AT_NAME: /* This only happens in Objective-C; it must be a keyword. */ token->type = CPP_KEYWORD; token->keyword = C_RID_CODE (token->value); break; case CPP_COLON: case CPP_COMMA: case CPP_CLOSE_PAREN: case CPP_SEMICOLON: /* These tokens may affect the interpretation of any identifiers following, if doing Objective-C. */ OBJC_NEED_RAW_IDENTIFIER (0); break; case CPP_PRAGMA: /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = TREE_INT_CST_LOW (token->value); token->value = NULL; break; default: break; } timevar_pop (TV_LEX); } /* Return a pointer to the next token from PARSER, reading it in if necessary. */ static inline c_token * c_parser_peek_token (c_parser *parser) { if (parser->tokens_avail == 0) { /* APPLE LOCAL begin switch these two */ parser->tokens_avail = 1; /* APPLE LOCAL C* language (in 4.2 ae) */ c_lex_one_token (&parser->tokens[0], parser); /* APPLE LOCAL end switch these two */ } return &parser->tokens[0]; } /* Return true if the next token from PARSER has the indicated TYPE. */ static inline bool c_parser_next_token_is (c_parser *parser, enum cpp_ttype type) { return c_parser_peek_token (parser)->type == type; } /* Return true if the next token from PARSER does not have the indicated TYPE. */ static inline bool c_parser_next_token_is_not (c_parser *parser, enum cpp_ttype type) { return !c_parser_next_token_is (parser, type); } /* Return true if the next token from PARSER is the indicated KEYWORD. */ static inline bool c_parser_next_token_is_keyword (c_parser *parser, enum rid keyword) { c_token *token; /* Peek at the next token. */ token = c_parser_peek_token (parser); /* Check to see if it is the indicated keyword. */ return token->keyword == keyword; } /* Return true if TOKEN can start a type name, false otherwise. */ static bool c_token_starts_typename (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert (c_dialect_objc ()); return true; default: gcc_unreachable (); } case CPP_KEYWORD: switch (token->keyword) { case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: return true; default: return false; } case CPP_LESS: if (c_dialect_objc ()) return true; return false; default: return false; } } /* Return true if the next token from PARSER can start a type name, false otherwise. */ static inline bool c_parser_next_token_starts_typename (c_parser *parser) { c_token *token = c_parser_peek_token (parser); return c_token_starts_typename (token); } /* Return true if TOKEN can start declaration specifiers, false otherwise. */ static bool c_token_starts_declspecs (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert (c_dialect_objc ()); return true; default: gcc_unreachable (); } case CPP_KEYWORD: switch (token->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_AUTO: case RID_THREAD: case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: return true; default: return false; } case CPP_LESS: if (c_dialect_objc ()) return true; return false; default: return false; } } /* Return true if the next token from PARSER can start declaration specifiers, false otherwise. */ static inline bool c_parser_next_token_starts_declspecs (c_parser *parser) { c_token *token = c_parser_peek_token (parser); /* APPLE LOCAL begin radar 5277239 */ /* Yes, we can have CLASS.method to mean property-style dot-syntax notation to call a class method (equiv to [CLASS meth]). */ return c_token_starts_declspecs (token) && (token->id_kind != C_ID_CLASSNAME || c_parser_peek_2nd_token (parser)->type != CPP_DOT); /* APPLE LOCAL end radar 5277239 */ } /* Return a pointer to the next-but-one token from PARSER, reading it in if necessary. The next token is already read in. */ static c_token * c_parser_peek_2nd_token (c_parser *parser) { if (parser->tokens_avail >= 2) return &parser->tokens[1]; gcc_assert (parser->tokens_avail == 1); gcc_assert (parser->tokens[0].type != CPP_EOF); gcc_assert (parser->tokens[0].type != CPP_PRAGMA_EOL); /* APPLE LOCAL begin switch these two */ parser->tokens_avail = 2; /* APPLE LOCAL C* language (in 4.2 ae) */ c_lex_one_token (&parser->tokens[1], parser); /* APPLE LOCAL end switch these two */ return &parser->tokens[1]; } /* Consume the next token from PARSER. */ static void c_parser_consume_token (c_parser *parser) { gcc_assert (parser->tokens_avail >= 1); gcc_assert (parser->tokens[0].type != CPP_EOF); gcc_assert (!parser->in_pragma || parser->tokens[0].type != CPP_PRAGMA_EOL); gcc_assert (parser->error || parser->tokens[0].type != CPP_PRAGMA); if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; } /* Expect the current token to be a #pragma. Consume it and remember that we've begun parsing a pragma. */ static void c_parser_consume_pragma (c_parser *parser) { gcc_assert (!parser->in_pragma); gcc_assert (parser->tokens_avail >= 1); gcc_assert (parser->tokens[0].type == CPP_PRAGMA); if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; parser->in_pragma = true; } /* Update the globals input_location and in_system_header from TOKEN. */ static inline void c_parser_set_source_position_from_token (c_token *token) { if (token->type != CPP_EOF) { input_location = token->location; in_system_header = token->in_system_header; } } /* Issue a diagnostic of the form FILE:LINE: MESSAGE before TOKEN where TOKEN is the next token in the input stream of PARSER. MESSAGE (specified by the caller) is usually of the form "expected OTHER-TOKEN". Do not issue a diagnostic if still recovering from an error. ??? This is taken from the C++ parser, but building up messages in this way is not i18n-friendly and some other approach should be used. */ static void c_parser_error (c_parser *parser, const char *gmsgid) { c_token *token = c_parser_peek_token (parser); if (parser->error) return; parser->error = true; if (!gmsgid) return; /* This diagnostic makes more sense if it is tagged to the line of the token we just peeked at. */ c_parser_set_source_position_from_token (token); c_parse_error (gmsgid, /* Because c_parse_error does not understand CPP_KEYWORD, keywords are treated like identifiers. */ (token->type == CPP_KEYWORD ? CPP_NAME : token->type), token->value); } /* If the next token is of the indicated TYPE, consume it. Otherwise, issue the error MSGID. If MSGID is NULL then a message has already been produced and no message will be produced this time. Returns true if found, false otherwise. */ static bool c_parser_require (c_parser *parser, enum cpp_ttype type, const char *msgid) { if (c_parser_next_token_is (parser, type)) { c_parser_consume_token (parser); return true; } else { c_parser_error (parser, msgid); return false; } } /* If the next token is the indicated keyword, consume it. Otherwise, issue the error MSGID. Returns true if found, false otherwise. */ static bool c_parser_require_keyword (c_parser *parser, enum rid keyword, const char *msgid) { if (c_parser_next_token_is_keyword (parser, keyword)) { c_parser_consume_token (parser); return true; } else { c_parser_error (parser, msgid); return false; } } /* Like c_parser_require, except that tokens will be skipped until the desired token is found. An error message is still produced if the next token is not as expected. If MSGID is NULL then a message has already been produced and no message will be produced this time. */ static void c_parser_skip_until_found (c_parser *parser, enum cpp_ttype type, const char *msgid) { unsigned nesting_depth = 0; if (c_parser_require (parser, type, msgid)) return; /* Skip tokens until the desired token is found. */ while (true) { /* Peek at the next token. */ c_token *token = c_parser_peek_token (parser); /* If we've reached the token we want, consume it and stop. */ if (token->type == type && !nesting_depth) { c_parser_consume_token (parser); break; } /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token (parser); } parser->error = false; } /* Skip tokens until the end of a parameter is found, but do not consume the comma, semicolon or closing delimiter. */ static void c_parser_skip_to_end_of_parameter (c_parser *parser) { unsigned nesting_depth = 0; while (true) { c_token *token = c_parser_peek_token (parser); if ((token->type == CPP_COMMA || token->type == CPP_SEMICOLON) && !nesting_depth) break; /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token (parser); } parser->error = false; } /* Expect to be at the end of the pragma directive and consume an end of line marker. */ static void c_parser_skip_to_pragma_eol (c_parser *parser) { gcc_assert (parser->in_pragma); parser->in_pragma = false; if (!c_parser_require (parser, CPP_PRAGMA_EOL, "expected end of line")) while (true) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_EOF) break; if (token->type == CPP_PRAGMA_EOL) { c_parser_consume_token (parser); break; } c_parser_consume_token (parser); } parser->error = false; } /* Skip tokens until we have consumed an entire block, or until we have consumed a non-nested ';'. */ static void c_parser_skip_to_end_of_block_or_statement (c_parser *parser) { unsigned nesting_depth = 0; bool save_error = parser->error; while (true) { c_token *token; /* Peek at the next token. */ token = c_parser_peek_token (parser); switch (token->type) { case CPP_EOF: return; case CPP_PRAGMA_EOL: if (parser->in_pragma) return; break; case CPP_SEMICOLON: /* If the next token is a ';', we have reached the end of the statement. */ if (!nesting_depth) { /* Consume the ';'. */ c_parser_consume_token (parser); goto finished; } break; case CPP_CLOSE_BRACE: /* If the next token is a non-nested '}', then we have reached the end of the current block. */ if (nesting_depth == 0 || --nesting_depth == 0) { c_parser_consume_token (parser); goto finished; } break; case CPP_OPEN_BRACE: /* If it the next token is a '{', then we are entering a new block. Consume the entire block. */ ++nesting_depth; break; case CPP_PRAGMA: /* If we see a pragma, consume the whole thing at once. We have some safeguards against consuming pragmas willy-nilly. Normally, we'd expect to be here with parser->error set, which disables these safeguards. But it's possible to get here for secondary error recovery, after parser->error has been cleared. */ c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); parser->error = save_error; continue; default: break; } c_parser_consume_token (parser); } finished: parser->error = false; } /* Save the warning flags which are controlled by __extension__. */ static inline int disable_extension_diagnostics (void) { int ret = (pedantic | (warn_pointer_arith << 1) | (warn_traditional << 2) | (flag_iso << 3)); pedantic = 0; warn_pointer_arith = 0; warn_traditional = 0; flag_iso = 0; return ret; } /* Restore the warning flags which are controlled by __extension__. FLAGS is the return value from disable_extension_diagnostics. */ static inline void restore_extension_diagnostics (int flags) { pedantic = flags & 1; warn_pointer_arith = (flags >> 1) & 1; warn_traditional = (flags >> 2) & 1; flag_iso = (flags >> 3) & 1; } /* Possibly kinds of declarator to parse. */ typedef enum c_dtr_syn { /* A normal declarator with an identifier. */ C_DTR_NORMAL, /* An abstract declarator (maybe empty). */ C_DTR_ABSTRACT, /* APPLE LOCAL begin blocks 6339747 */ /* A block declarator (maybe empty). */ C_DTR_BLOCK, /* APPLE LOCAL end blocks 6339747 */ /* A parameter declarator: may be either, but after a type name does not redeclare a typedef name as an identifier if it can alternatively be interpreted as a typedef name; see DR#009, applied in C90 TC1, omitted from C99 and reapplied in C99 TC2 following DR#249. For example, given a typedef T, "int T" and "int *T" are valid parameter declarations redeclaring T, while "int (T)" and "int * (T)" and "int (T[])" and "int (T (int))" are abstract declarators rather than involving redundant parentheses; the same applies with attributes inside the parentheses before "T". */ C_DTR_PARM } c_dtr_syn; static void c_parser_external_declaration (c_parser *); static void c_parser_asm_definition (c_parser *); /* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */ static void c_parser_declaration_or_fndef (c_parser *, bool, bool, bool, bool, tree*); static void c_parser_declspecs (c_parser *, struct c_declspecs *, bool, bool, bool); static struct c_typespec c_parser_enum_specifier (c_parser *); static struct c_typespec c_parser_struct_or_union_specifier (c_parser *); static tree c_parser_struct_declaration (c_parser *); static struct c_typespec c_parser_typeof_specifier (c_parser *); static struct c_declarator *c_parser_declarator (c_parser *, bool, c_dtr_syn, bool *); static struct c_declarator *c_parser_direct_declarator (c_parser *, bool, c_dtr_syn, bool *); static struct c_declarator *c_parser_direct_declarator_inner (c_parser *, bool, struct c_declarator *); static struct c_arg_info *c_parser_parms_declarator (c_parser *, bool, tree); static struct c_arg_info *c_parser_parms_list_declarator (c_parser *, tree); static struct c_parm *c_parser_parameter_declaration (c_parser *, tree); static tree c_parser_simple_asm_expr (c_parser *); static tree c_parser_attributes (c_parser *); static struct c_type_name *c_parser_type_name (c_parser *); static struct c_expr c_parser_initializer (c_parser *); static struct c_expr c_parser_braced_init (c_parser *, tree, bool); static void c_parser_initelt (c_parser *); static void c_parser_initval (c_parser *, struct c_expr *); static tree c_parser_compound_statement (c_parser *); static void c_parser_compound_statement_nostart (c_parser *); static void c_parser_label (c_parser *); static void c_parser_statement (c_parser *); static void c_parser_statement_after_labels (c_parser *); static void c_parser_if_statement (c_parser *); static void c_parser_switch_statement (c_parser *); static void c_parser_while_statement (c_parser *); static void c_parser_do_statement (c_parser *); static void c_parser_for_statement (c_parser *); static tree c_parser_asm_statement (c_parser *); /* APPLE LOCAL begin radar 5732232 - blocks (C++ ca) */ static tree c_parser_block_literal_expr (c_parser *); /* APPLE LOCAL end radar 5732232 - blocks (C++ ca) */ static tree c_parser_asm_operands (c_parser *, bool); static tree c_parser_asm_clobbers (c_parser *); static struct c_expr c_parser_expr_no_commas (c_parser *, struct c_expr *); static struct c_expr c_parser_conditional_expression (c_parser *, struct c_expr *); static struct c_expr c_parser_binary_expression (c_parser *, struct c_expr *); static struct c_expr c_parser_cast_expression (c_parser *, struct c_expr *); static struct c_expr c_parser_unary_expression (c_parser *); static struct c_expr c_parser_sizeof_expression (c_parser *); static struct c_expr c_parser_alignof_expression (c_parser *); static struct c_expr c_parser_postfix_expression (c_parser *); static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *, struct c_type_name *); static struct c_expr c_parser_postfix_expression_after_primary (c_parser *, struct c_expr); static struct c_expr c_parser_expression (c_parser *); static struct c_expr c_parser_expression_conv (c_parser *); static tree c_parser_expr_list (c_parser *, bool); static void c_parser_omp_construct (c_parser *); static void c_parser_omp_threadprivate (c_parser *); static void c_parser_omp_barrier (c_parser *); static void c_parser_omp_flush (c_parser *); enum pragma_context { pragma_external, pragma_stmt, pragma_compound }; static bool c_parser_pragma (c_parser *, enum pragma_context); /* These Objective-C parser functions are only ever called when compiling Objective-C. */ /* APPLE LOCAL radar 4548636 - class attributes. */ static void c_parser_objc_class_definition (c_parser *, tree); static void c_parser_objc_class_instance_variables (c_parser *); static void c_parser_objc_class_declaration (c_parser *); static void c_parser_objc_alias_declaration (c_parser *); /* APPLE LOCAL radar 4947311 - protocol attributes */ static void c_parser_objc_protocol_definition (c_parser *, tree); static enum tree_code c_parser_objc_method_type (c_parser *); static void c_parser_objc_method_definition (c_parser *); /* APPLE LOCAL C* property (Radar 4436866) (in 4.2 b) */ static void c_parser_objc_interfacedecllist (c_parser *); /* APPLE LOCAL C* property (Radar 4436866) (in 4.2 x) */ static void c_parser_objc_property_declaration (c_parser *); static void c_parser_objc_methodproto (c_parser *); static tree c_parser_objc_method_decl (c_parser *); static tree c_parser_objc_type_name (c_parser *); static tree c_parser_objc_protocol_refs (c_parser *); static void c_parser_objc_try_catch_statement (c_parser *); static void c_parser_objc_synchronized_statement (c_parser *); static tree c_parser_objc_selector (c_parser *); static tree c_parser_objc_selector_arg (c_parser *); static tree c_parser_objc_receiver (c_parser *); static tree c_parser_objc_message_args (c_parser *); static tree c_parser_objc_keywordexpr (c_parser *); /* Parse a translation unit (C90 6.7, C99 6.9). translation-unit: external-declarations external-declarations: external-declaration external-declarations external-declaration GNU extensions: translation-unit: empty */ static void c_parser_translation_unit (c_parser *parser) { if (c_parser_next_token_is (parser, CPP_EOF)) { if (pedantic) pedwarn ("ISO C forbids an empty source file"); } else { void *obstack_position = obstack_alloc (&parser_obstack, 0); do { ggc_collect (); c_parser_external_declaration (parser); obstack_free (&parser_obstack, obstack_position); } while (c_parser_next_token_is_not (parser, CPP_EOF)); } } /* Parse an external declaration (C90 6.7, C99 6.9). external-declaration: function-definition declaration GNU extensions: external-declaration: asm-definition ; __extension__ external-declaration Objective-C: external-declaration: objc-class-definition objc-class-declaration objc-alias-declaration objc-protocol-definition objc-method-definition @end */ static void c_parser_external_declaration (c_parser *parser) { int ext; switch (c_parser_peek_token (parser)->type) { case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_EXTENSION: ext = disable_extension_diagnostics (); c_parser_consume_token (parser); c_parser_external_declaration (parser); restore_extension_diagnostics (ext); break; case RID_ASM: c_parser_asm_definition (parser); break; case RID_AT_INTERFACE: case RID_AT_IMPLEMENTATION: gcc_assert (c_dialect_objc ()); /* APPLE LOCAL radar 4548636 - class attributes. */ c_parser_objc_class_definition (parser, NULL_TREE); break; case RID_AT_CLASS: gcc_assert (c_dialect_objc ()); c_parser_objc_class_declaration (parser); break; case RID_AT_ALIAS: gcc_assert (c_dialect_objc ()); c_parser_objc_alias_declaration (parser); break; case RID_AT_PROTOCOL: gcc_assert (c_dialect_objc ()); /* APPLE LOCAL begin radar 4947311 - protocol attributes */ c_parser_objc_protocol_definition (parser, NULL_TREE); break; /* APPLE LOCAL end radar 4947311 - protocol attributes */ /* APPLE LOCAL begin C* property (Radar 4436866) (in 4.2 x) */ case RID_AT_PROPERTY: c_parser_objc_property_declaration (parser); break; /* APPLE LOCAL end C* property (Radar 4436866) (in 4.2 x) */ case RID_AT_END: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); objc_finish_implementation (); break; default: goto decl_or_fndef; } break; case CPP_SEMICOLON: if (pedantic) pedwarn ("ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token (parser); break; case CPP_PRAGMA: c_parser_pragma (parser, pragma_external); break; case CPP_PLUS: case CPP_MINUS: if (c_dialect_objc ()) { c_parser_objc_method_definition (parser); break; } /* Else fall through, and yield a syntax error trying to parse as a declaration or function definition. */ default: decl_or_fndef: /* A declaration or a function definition. We can only tell which after parsing the declaration specifiers, if any, and the first declarator. */ /* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */ c_parser_declaration_or_fndef (parser, true, true, false, true, NULL); break; } } /* Parse a declaration or function definition (C90 6.5, 6.7.1, C99 6.7, 6.9.1). If FNDEF_OK is true, a function definition is accepted; otherwise (old-style parameter declarations) only other declarations are accepted. If NESTED is true, we are inside a function or parsing old-style parameter declarations; any functions encountered are nested functions and declaration specifiers are required; otherwise we are at top level and functions are normal functions and declaration specifiers may be optional. If EMPTY_OK is true, empty declarations are OK (subject to all other constraints); otherwise (old-style parameter declarations) they are diagnosed. If START_ATTR_OK is true, the declaration specifiers may start with attributes; otherwise they may not. declaration: declaration-specifiers init-declarator-list[opt] ; function-definition: declaration-specifiers[opt] declarator declaration-list[opt] compound-statement declaration-list: declaration declaration-list declaration init-declarator-list: init-declarator init-declarator-list , init-declarator init-declarator: declarator simple-asm-expr[opt] attributes[opt] declarator simple-asm-expr[opt] attributes[opt] = initializer GNU extensions: nested-function-definition: declaration-specifiers declarator declaration-list[opt] compound-statement The simple-asm-expr and attributes are GNU extensions. This function does not handle __extension__; that is handled in its callers. ??? Following the old parser, __extension__ may start external declarations, declarations in functions and declarations at the start of "for" loops, but not old-style parameter declarations. C99 requires declaration specifiers in a function definition; the absence is diagnosed through the diagnosis of implicit int. In GNU C we also allow but diagnose declarations without declaration specifiers, but only at top level (elsewhere they conflict with other syntax). OpenMP: declaration: threadprivate-directive */ static void c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok, bool empty_ok, /* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */ bool nested, bool start_attr_ok, tree *foreach_elem) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; bool diagnosed_no_specs = false; specs = build_null_declspecs (); c_parser_declspecs (parser, specs, true, true, start_attr_ok); if (parser->error) { c_parser_skip_to_end_of_block_or_statement (parser); return; } if (nested && !specs->declspecs_seen_p) { c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_to_end_of_block_or_statement (parser); return; } finish_declspecs (specs); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { if (empty_ok) shadow_tag (specs); else { shadow_tag_warned (specs, 1); pedwarn ("empty declaration"); } c_parser_consume_token (parser); return; } /* APPLE LOCAL begin radar 4548636 - class attributes. */ else if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE) || c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION)) { gcc_assert (c_dialect_objc ()); if (!specs->declspecs_seen_p || specs->attrs == NULL_TREE || specs->type_seen_p || specs->non_sc_seen_p) c_parser_error (parser, "no type or storage class may be specified here"); c_parser_objc_class_definition (parser, specs->attrs); return; } /* APPLE LOCAL end radar 4548636 - class attributes. */ /* APPLE LOCAL begin radar 4947311 - protocol attributes */ else if (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL)) { gcc_assert (c_dialect_objc ()); if (!specs->declspecs_seen_p || specs->attrs == NULL_TREE || specs->type_seen_p || specs->non_sc_seen_p) c_parser_error (parser, "no type or storage class may be specified here"); c_parser_objc_protocol_definition (parser, specs->attrs); return; } /* APPLE LOCAL end radar 4947311 - protocol attributes */ pending_xref_error (); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; while (true) { struct c_declarator *declarator; bool dummy = false; tree fnbody; /* Declaring either one or more declarators (in which case we should diagnose if there were no declaration specifiers) or a function definition (in which case the diagnostic for implicit int suffices). */ declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_NORMAL, &dummy); if (declarator == NULL) { c_parser_skip_to_end_of_block_or_statement (parser); return; } if (c_parser_next_token_is (parser, CPP_EQ) || c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is_keyword (parser, RID_ASM) /* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */ || c_parser_next_token_is_keyword (parser, RID_IN) || c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { tree asm_name = NULL_TREE; tree postfix_attrs = NULL_TREE; if (!diagnosed_no_specs && !specs->declspecs_seen_p) { diagnosed_no_specs = true; pedwarn ("data definition has no type or storage class"); } /* Having seen a data definition, there cannot now be a function definition. */ fndef_ok = false; if (c_parser_next_token_is_keyword (parser, RID_ASM)) asm_name = c_parser_simple_asm_expr (parser); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); /* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */ if (c_parser_next_token_is_keyword (parser, RID_IN)) { gcc_assert (foreach_elem); *foreach_elem = start_decl (declarator, specs, true, chainon (postfix_attrs, all_prefix_attrs)); if (!*foreach_elem) *foreach_elem = error_mark_node; start_init (*foreach_elem, asm_name, global_bindings_p ()); return; } /* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */ if (c_parser_next_token_is (parser, CPP_EQ)) { tree d; struct c_expr init; c_parser_consume_token (parser); /* The declaration of the variable is in effect while its initializer is parsed. */ d = start_decl (declarator, specs, true, chainon (postfix_attrs, all_prefix_attrs)); if (!d) d = error_mark_node; start_init (d, asm_name, global_bindings_p ()); init = c_parser_initializer (parser); finish_init (); if (d != error_mark_node) { maybe_warn_string_init (TREE_TYPE (d), init); finish_decl (d, init.value, asm_name); } } else { tree d = start_decl (declarator, specs, false, chainon (postfix_attrs, all_prefix_attrs)); if (d) finish_decl (d, NULL_TREE, asm_name); } if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon (c_parser_attributes (parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; continue; } else if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); return; } else { c_parser_error (parser, "expected %<,%> or %<;%>"); c_parser_skip_to_end_of_block_or_statement (parser); return; } } else if (!fndef_ok) { c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, " "%<asm%> or %<__attribute__%>"); c_parser_skip_to_end_of_block_or_statement (parser); return; } /* Function definition (nested or otherwise). */ if (nested) { /* APPLE LOCAL begin radar 5985368 */ if (declarator->declarator && declarator->declarator->kind == cdk_block_pointer) error ("bad definition of a block"); else if (pedantic) /* APPLE LOCAL end radar 5985368 */ pedwarn ("ISO C forbids nested functions"); /* APPLE LOCAL begin nested functions 4258406 4357979 (in 4.2 m) */ else if (flag_nested_functions == 0) error ("nested functions are disabled, use -fnested-functions to re-enable"); /* APPLE LOCAL end nested functions 4258406 4357979 (in 4.2 m) */ push_function_context (); } if (!start_function (specs, declarator, all_prefix_attrs)) { /* This can appear in many cases looking nothing like a function definition, so we don't give a more specific error suggesting there was one. */ c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, %<asm%> " "or %<__attribute__%>"); if (nested) pop_function_context (); break; } /* Parse old-style parameter declarations. ??? Attributes are not allowed to start declaration specifiers here because of a syntax conflict between a function declaration with attribute suffix and a function definition with an attribute prefix on first old-style parameter declaration. Following the old parser, they are not accepted on subsequent old-style parameter declarations either. However, there is no ambiguity after the first declaration, nor indeed on the first as long as we don't allow postfix attributes after a declarator with a nonempty identifier list in a definition; and postfix attributes have never been accepted here in function definitions either. */ while (c_parser_next_token_is_not (parser, CPP_EOF) && c_parser_next_token_is_not (parser, CPP_OPEN_BRACE)) /* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */ c_parser_declaration_or_fndef (parser, false, false, true, false, NULL); DECL_SOURCE_LOCATION (current_function_decl) = c_parser_peek_token (parser)->location; store_parm_decls (); fnbody = c_parser_compound_statement (parser); if (nested) { tree decl = current_function_decl; add_stmt (fnbody); finish_function (); pop_function_context (); add_stmt (build_stmt (DECL_EXPR, decl)); } else { add_stmt (fnbody); finish_function (); } break; } } static tree finish_parse_foreach_header (c_parser *parser, tree foreach_elem_selector) { tree res; int save_flag_isoc99 = flag_isoc99; gcc_assert (foreach_elem_selector); /* Consume 'in' keyword */ c_parser_consume_token (parser); res = build_tree_list (foreach_elem_selector, c_parser_initializer (parser).value); finish_init (); flag_isoc99 = 1; check_for_loop_decls (); flag_isoc99 = save_flag_isoc99; return res; } /* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */ /* Parse an asm-definition (asm() outside a function body). This is a GNU extension. asm-definition: simple-asm-expr ; */ static void c_parser_asm_definition (c_parser *parser) { tree asm_str = c_parser_simple_asm_expr (parser); if (asm_str) cgraph_add_asm_node (asm_str); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse some declaration specifiers (possibly none) (C90 6.5, C99 6.7), adding them to SPECS (which may already include some). Storage class specifiers are accepted iff SCSPEC_OK; type specifiers are accepted iff TYPESPEC_OK; attributes are accepted at the start iff START_ATTR_OK. declaration-specifiers: storage-class-specifier declaration-specifiers[opt] type-specifier declaration-specifiers[opt] type-qualifier declaration-specifiers[opt] function-specifier declaration-specifiers[opt] Function specifiers (inline) are from C99, and are currently handled as storage class specifiers, as is __thread. C90 6.5.1, C99 6.7.1: storage-class-specifier: typedef extern static auto register C99 6.7.4: function-specifier: inline C90 6.5.2, C99 6.7.2: type-specifier: void char short int long float double signed unsigned _Bool _Complex [_Imaginary removed in C99 TC2] struct-or-union-specifier enum-specifier typedef-name (_Bool and _Complex are new in C99.) C90 6.5.3, C99 6.7.3: type-qualifier: const restrict volatile (restrict is new in C99.) GNU extensions: declaration-specifiers: attributes declaration-specifiers[opt] storage-class-specifier: __thread type-specifier: typeof-specifier _Decimal32 _Decimal64 _Decimal128 Objective-C: type-specifier: class-name objc-protocol-refs[opt] typedef-name objc-protocol-refs objc-protocol-refs */ static void c_parser_declspecs (c_parser *parser, struct c_declspecs *specs, bool scspec_ok, bool typespec_ok, bool start_attr_ok) { bool attrs_ok = start_attr_ok; bool seen_type = specs->type_seen_p; while (c_parser_next_token_is (parser, CPP_NAME) || c_parser_next_token_is (parser, CPP_KEYWORD) || (c_dialect_objc () && c_parser_next_token_is (parser, CPP_LESS))) { struct c_typespec t; tree attrs; if (c_parser_next_token_is (parser, CPP_NAME)) { tree value = c_parser_peek_token (parser)->value; c_id_kind kind = c_parser_peek_token (parser)->id_kind; /* This finishes the specifiers unless a type name is OK, it is declared as a type name and a type name hasn't yet been seen. */ if (!typespec_ok || seen_type || (kind != C_ID_TYPENAME && kind != C_ID_CLASSNAME)) break; c_parser_consume_token (parser); seen_type = true; attrs_ok = true; if (kind == C_ID_TYPENAME && (!c_dialect_objc () || c_parser_next_token_is_not (parser, CPP_LESS))) { t.kind = ctsk_typedef; /* For a typedef name, record the meaning, not the name. In case of 'foo foo, bar;'. */ t.spec = lookup_name (value); } else { tree proto = NULL_TREE; gcc_assert (c_dialect_objc ()); t.kind = ctsk_objc; if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); t.spec = objc_get_protocol_qualified_type (value, proto); } declspecs_add_type (specs, t); continue; } if (c_parser_next_token_is (parser, CPP_LESS)) { /* Make "<SomeProtocol>" equivalent to "id <SomeProtocol>" - nisse@lysator.liu.se. */ tree proto; gcc_assert (c_dialect_objc ()); if (!typespec_ok || seen_type) break; proto = c_parser_objc_protocol_refs (parser); t.kind = ctsk_objc; t.spec = objc_get_protocol_qualified_type (NULL_TREE, proto); declspecs_add_type (specs, t); continue; } gcc_assert (c_parser_next_token_is (parser, CPP_KEYWORD)); switch (c_parser_peek_token (parser)->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_AUTO: case RID_THREAD: if (!scspec_ok) goto out; attrs_ok = true; /* TODO: Distinguish between function specifiers (inline) and storage class specifiers, either here or in declspecs_add_scspec. */ declspecs_add_scspec (specs, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; OBJC_NEED_RAW_IDENTIFIER (1); t.kind = ctsk_resword; t.spec = c_parser_peek_token (parser)->value; declspecs_add_type (specs, t); c_parser_consume_token (parser); break; case RID_ENUM: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_enum_specifier (parser); declspecs_add_type (specs, t); break; case RID_STRUCT: case RID_UNION: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_struct_or_union_specifier (parser); declspecs_add_type (specs, t); break; case RID_TYPEOF: /* ??? The old parser rejected typeof after other type specifiers, but is a syntax error the best way of handling this? */ if (!typespec_ok || seen_type) goto out; attrs_ok = true; seen_type = true; t = c_parser_typeof_specifier (parser); declspecs_add_type (specs, t); break; case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: attrs_ok = true; declspecs_add_qual (specs, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_ATTRIBUTE: if (!attrs_ok) goto out; attrs = c_parser_attributes (parser); declspecs_add_attrs (specs, attrs); break; default: goto out; } } out: ; } /* Parse an enum specifier (C90 6.5.2.2, C99 6.7.2.2). enum-specifier: enum attributes[opt] identifier[opt] { enumerator-list } attributes[opt] enum attributes[opt] identifier[opt] { enumerator-list , } attributes[opt] enum attributes[opt] identifier The form with trailing comma is new in C99. The forms with attributes are GNU extensions. In GNU C, we accept any expression without commas in the syntax (assignment expressions, not just conditional expressions); assignment expressions will be diagnosed as non-constant. enumerator-list: enumerator enumerator-list , enumerator enumerator: enumeration-constant enumeration-constant = constant-expression */ static struct c_typespec c_parser_enum_specifier (c_parser *parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ENUM)); c_parser_consume_token (parser); attrs = c_parser_attributes (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { ident = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* Parse an enum definition. */ tree type = start_enum (ident); tree postfix_attrs; /* We chain the enumerators in reverse order, then put them in forward order at the end. */ tree values = NULL_TREE; c_parser_consume_token (parser); while (true) { tree enum_id; tree enum_value; tree enum_decl; bool seen_comma; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } enum_id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_EQ)) { c_parser_consume_token (parser); enum_value = c_parser_expr_no_commas (parser, NULL).value; } else enum_value = NULL_TREE; enum_decl = build_enumerator (enum_id, enum_value); TREE_CHAIN (enum_decl) = values; values = enum_decl; seen_comma = false; if (c_parser_next_token_is (parser, CPP_COMMA)) { seen_comma = true; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { if (seen_comma && pedantic && !flag_isoc99) pedwarn ("comma at end of enumerator list"); c_parser_consume_token (parser); break; } if (!seen_comma) { c_parser_error (parser, "expected %<,%> or %<}%>"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } } postfix_attrs = c_parser_attributes (parser); ret.spec = finish_enum (type, nreverse (values), chainon (attrs, postfix_attrs)); ret.kind = ctsk_tagdef; return ret; } else if (!ident) { c_parser_error (parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; return ret; } ret = parser_xref_tag (ENUMERAL_TYPE, ident); /* In ISO C, enumerated types can be referred to only if already defined. */ if (pedantic && !COMPLETE_TYPE_P (ret.spec)) pedwarn ("ISO C forbids forward references to %<enum%> types"); return ret; } /* Parse a struct or union specifier (C90 6.5.2.1, C99 6.7.2.1). struct-or-union-specifier: struct-or-union attributes[opt] identifier[opt] { struct-contents } attributes[opt] struct-or-union attributes[opt] identifier struct-contents: struct-declaration-list struct-declaration-list: struct-declaration ; struct-declaration-list struct-declaration ; GNU extensions: struct-contents: empty struct-declaration struct-declaration-list struct-declaration struct-declaration-list: struct-declaration-list ; ; (Note that in the syntax here, unlike that in ISO C, the semicolons are included here rather than in struct-declaration, in order to describe the syntax with extra semicolons and missing semicolon at end.) Objective-C: struct-declaration-list: @defs ( class-name ) (Note this does not include a trailing semicolon, but can be followed by further declarations, and gets a pedwarn-if-pedantic when followed by a semicolon.) */ static struct c_typespec c_parser_struct_or_union_specifier (c_parser *parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; enum tree_code code; switch (c_parser_peek_token (parser)->keyword) { case RID_STRUCT: code = RECORD_TYPE; break; case RID_UNION: code = UNION_TYPE; break; default: gcc_unreachable (); } c_parser_consume_token (parser); attrs = c_parser_attributes (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { ident = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* Parse a struct or union definition. Start the scope of the tag before parsing components. */ tree type = start_struct (code, ident); tree postfix_attrs; /* We chain the components in reverse order, then put them in forward order at the end. Each struct-declaration may declare multiple components (comma-separated), so we must use chainon to join them, although when parsing each struct-declaration we can use TREE_CHAIN directly. The theory behind all this is that there will be more semicolon separated fields than comma separated fields, and so we'll be minimizing the number of node traversals required by chainon. */ tree contents = NULL_TREE; c_parser_consume_token (parser); /* Handle the Objective-C @defs construct, e.g. foo(sizeof(struct{ @defs(ClassName) }));. */ if (c_parser_next_token_is_keyword (parser, RID_AT_DEFS)) { tree name; gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto end_at_defs; if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else { c_parser_error (parser, "expected class name"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto end_at_defs; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); contents = nreverse (objc_get_class_ivars (name)); } end_at_defs: /* Parse the struct-declarations and semicolons. Problems with semicolons are diagnosed here; empty structures are diagnosed elsewhere. */ while (true) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { if (pedantic) pedwarn ("extra semicolon in struct or union specified"); c_parser_consume_token (parser); continue; } /* Stop if at the end of the struct or union contents. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); break; } /* Accept #pragmas at struct scope. */ if (c_parser_next_token_is (parser, CPP_PRAGMA)) { c_parser_pragma (parser, pragma_external); continue; } /* Parse some comma-separated declarations, but not the trailing semicolon if any. */ decls = c_parser_struct_declaration (parser); contents = chainon (decls, contents); /* If no semicolon follows, either we have a parse error or are at the end of the struct or union and should pedwarn. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) pedwarn ("no semicolon at end of struct or union"); else { c_parser_error (parser, "expected %<;%>"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); break; } } } postfix_attrs = c_parser_attributes (parser); ret.spec = finish_struct (type, nreverse (contents), chainon (attrs, postfix_attrs)); ret.kind = ctsk_tagdef; return ret; } else if (!ident) { c_parser_error (parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; return ret; } ret = parser_xref_tag (code, ident); return ret; } /* Parse a struct-declaration (C90 6.5.2.1, C99 6.7.2.1), *without* the trailing semicolon. struct-declaration: specifier-qualifier-list struct-declarator-list specifier-qualifier-list: type-specifier specifier-qualifier-list[opt] type-qualifier specifier-qualifier-list[opt] attributes specifier-qualifier-list[opt] struct-declarator-list: struct-declarator struct-declarator-list , attributes[opt] struct-declarator struct-declarator: declarator attributes[opt] declarator[opt] : constant-expression attributes[opt] GNU extensions: struct-declaration: __extension__ struct-declaration specifier-qualifier-list Unlike the ISO C syntax, semicolons are handled elsewhere. The use of attributes where shown is a GNU extension. In GNU C, we accept any expression without commas in the syntax (assignment expressions, not just conditional expressions); assignment expressions will be diagnosed as non-constant. */ static tree c_parser_struct_declaration (c_parser *parser) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; tree decls; if (c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { int ext; tree decl; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); decl = c_parser_struct_declaration (parser); restore_extension_diagnostics (ext); return decl; } specs = build_null_declspecs (); c_parser_declspecs (parser, specs, false, true, true); if (parser->error) return NULL_TREE; if (!specs->declspecs_seen_p) { c_parser_error (parser, "expected specifier-qualifier-list"); return NULL_TREE; } finish_declspecs (specs); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { tree ret; if (!specs->type_seen_p) { if (pedantic) pedwarn ("ISO C forbids member declarations with no members"); shadow_tag_warned (specs, pedantic); ret = NULL_TREE; } else { /* Support for unnamed structs or unions as members of structs or unions (which is [a] useful and [b] supports MS P-SDK). */ ret = grokfield (build_id_declarator (NULL_TREE), specs, NULL_TREE); } return ret; } pending_xref_error (); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; decls = NULL_TREE; while (true) { /* Declaring one or more declarators or un-named bit-fields. */ struct c_declarator *declarator; bool dummy = false; if (c_parser_next_token_is (parser, CPP_COLON)) declarator = build_id_declarator (NULL_TREE); else declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_NORMAL, &dummy); if (declarator == NULL) { c_parser_skip_to_end_of_block_or_statement (parser); break; } if (c_parser_next_token_is (parser, CPP_COLON) || c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE) || c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { tree postfix_attrs = NULL_TREE; tree width = NULL_TREE; tree d; if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); width = c_parser_expr_no_commas (parser, NULL).value; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); d = grokfield (declarator, specs, width); decl_attributes (&d, chainon (postfix_attrs, all_prefix_attrs), 0); TREE_CHAIN (d) = decls; decls = d; if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon (c_parser_attributes (parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else if (c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { /* Semicolon consumed in caller. */ break; } else { c_parser_error (parser, "expected %<,%>, %<;%> or %<}%>"); break; } } else { c_parser_error (parser, "expected %<:%>, %<,%>, %<;%>, %<}%> or " "%<__attribute__%>"); break; } } return decls; } /* Parse a typeof specifier (a GNU extension). typeof-specifier: typeof ( expression ) typeof ( type-name ) */ static struct c_typespec c_parser_typeof_specifier (c_parser *parser) { struct c_typespec ret; ret.kind = ctsk_typeof; ret.spec = error_mark_node; gcc_assert (c_parser_next_token_is_keyword (parser, RID_TYPEOF)); c_parser_consume_token (parser); skip_evaluation++; in_typeof++; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { skip_evaluation--; in_typeof--; return ret; } if (c_parser_next_token_starts_typename (parser)) { struct c_type_name *type = c_parser_type_name (parser); skip_evaluation--; in_typeof--; if (type != NULL) { ret.spec = groktypename (type); pop_maybe_used (variably_modified_type_p (ret.spec, NULL_TREE)); } } else { bool was_vm; struct c_expr expr = c_parser_expression (parser); skip_evaluation--; in_typeof--; if (TREE_CODE (expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1))) error ("%<typeof%> applied to a bit-field"); ret.spec = TREE_TYPE (expr.value); was_vm = variably_modified_type_p (ret.spec, NULL_TREE); /* This should be returned with the type so that when the type is evaluated, this can be evaluated. For now, we avoid evaluation when the context might. */ if (!skip_evaluation && was_vm) { tree e = expr.value; /* If the expression is not of a type to which we cannot assign a line number, wrap the thing in a no-op NOP_EXPR. */ if (DECL_P (e) || CONSTANT_CLASS_P (e)) e = build1 (NOP_EXPR, void_type_node, e); if (EXPR_P (e)) SET_EXPR_LOCATION (e, input_location); add_stmt (e); } pop_maybe_used (was_vm); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return ret; } /* Parse a declarator, possibly an abstract declarator (C90 6.5.4, 6.5.5, C99 6.7.5, 6.7.6). If TYPE_SEEN_P then a typedef name may be redeclared; otherwise it may not. KIND indicates which kind of declarator is wanted. Returns a valid declarator except in the case of a syntax error in which case NULL is returned. *SEEN_ID is set to true if an identifier being declared is seen; this is used to diagnose bad forms of abstract array declarators and to determine whether an identifier list is syntactically permitted. declarator: pointer[opt] direct-declarator direct-declarator: identifier ( attributes[opt] declarator ) direct-declarator array-declarator direct-declarator ( parameter-type-list ) direct-declarator ( identifier-list[opt] ) pointer: * type-qualifier-list[opt] * type-qualifier-list[opt] pointer type-qualifier-list: type-qualifier attributes type-qualifier-list type-qualifier type-qualifier-list attributes parameter-type-list: parameter-list parameter-list , ... parameter-list: parameter-declaration parameter-list , parameter-declaration parameter-declaration: declaration-specifiers declarator attributes[opt] declaration-specifiers abstract-declarator[opt] attributes[opt] identifier-list: identifier identifier-list , identifier abstract-declarator: pointer pointer[opt] direct-abstract-declarator direct-abstract-declarator: ( attributes[opt] abstract-declarator ) direct-abstract-declarator[opt] array-declarator direct-abstract-declarator[opt] ( parameter-type-list[opt] ) GNU extensions: direct-declarator: direct-declarator ( parameter-forward-declarations parameter-type-list[opt] ) direct-abstract-declarator: direct-abstract-declarator[opt] ( parameter-forward-declarations parameter-type-list[opt] ) parameter-forward-declarations: parameter-list ; parameter-forward-declarations parameter-list ; APPLE LOCAL begin blocks 6339747 block-declarator: pointer pointer[opt] direct-block-declarator direct-block-declarator: ( attributes[opt] block-declarator ) direct-block-declarator[opt] array-declarator direct-block-declarator[opt] ( parameter-type-list[opt] ) [opt] APPLE LOCAL end blocks 6339747 The uses of attributes shown above are GNU extensions. Some forms of array declarator are not included in C99 in the syntax for abstract declarators; these are disallowed elsewhere. This may be a defect (DR#289). This function also accepts an omitted abstract declarator as being an abstract declarator, although not part of the formal syntax. */ static struct c_declarator * c_parser_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind, bool *seen_id) { /* Parse any initial pointer part. */ if (c_parser_next_token_is (parser, CPP_MULT)) { struct c_declspecs *quals_attrs = build_null_declspecs (); struct c_declarator *inner; c_parser_consume_token (parser); c_parser_declspecs (parser, quals_attrs, false, false, true); inner = c_parser_declarator (parser, type_seen_p, kind, seen_id); if (inner == NULL) return NULL; else return make_pointer_declarator (quals_attrs, inner); } /* APPLE LOCAL begin radar 5732232 - blocks (C++ cc) */ else if (flag_blocks && c_parser_next_token_is (parser, CPP_XOR)) { struct c_declspecs *quals_attrs = build_null_declspecs (); struct c_declarator *inner; c_parser_consume_token (parser); c_parser_declspecs (parser, quals_attrs, false, false, true); inner = c_parser_declarator (parser, type_seen_p, kind, seen_id); if (inner == NULL) return NULL; else /* APPLE LOCAL radar 5814025 (C++ cc) */ return make_block_pointer_declarator (quals_attrs, inner); } /* APPLE LOCAL end radar 5732232 - blocks (C++ cc) */ /* Now we have a direct declarator, direct abstract declarator or nothing (which counts as a direct abstract declarator here). */ return c_parser_direct_declarator (parser, type_seen_p, kind, seen_id); } /* Parse a direct declarator or direct abstract declarator; arguments as c_parser_declarator. */ static struct c_declarator * c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind, bool *seen_id) { /* The direct declarator must start with an identifier (possibly omitted) or a parenthesized declarator (possibly abstract). In an ordinary declarator, initial parentheses must start a parenthesized declarator. In an abstract declarator or parameter declarator, they could start a parenthesized declarator or a parameter list. To tell which, the open parenthesis and any following attributes must be read. If a declaration specifier follows, then it is a parameter list; if the specifier is a typedef name, there might be an ambiguity about redeclaring it, which is resolved in the direction of treating it as a typedef name. If a close parenthesis follows, it is also an empty parameter list, as the syntax does not permit empty abstract declarators. Otherwise, it is a parenthesized declarator (in which case the analysis may be repeated inside it, recursively). ??? There is an ambiguity in a parameter declaration "int (__attribute__((foo)) x)", where x is not a typedef name: it could be an abstract declarator for a function, or declare x with parentheses. The proper resolution of this ambiguity needs documenting. At present we follow an accident of the old parser's implementation, whereby the first parameter must have some declaration specifiers other than just attributes. Thus as a parameter declaration it is treated as a parenthesized parameter named x, and as an abstract declarator it is rejected. ??? Also following the old parser, attributes inside an empty parameter list are ignored, making it a list not yielding a prototype, rather than giving an error or making it have one parameter with implicit type int. ??? Also following the old parser, typedef names may be redeclared in declarators, but not Objective-C class names. */ /* APPLE LOCAL blocks 6339747 */ if ((kind != C_DTR_ABSTRACT && kind != C_DTR_BLOCK) && c_parser_next_token_is (parser, CPP_NAME) && ((type_seen_p /* APPLE LOCAL begin radar 4281748 */ && (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME || c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)) /* APPLE LOCAL end radar 4281748 */ || c_parser_peek_token (parser)->id_kind == C_ID_ID)) { struct c_declarator *inner = build_id_declarator (c_parser_peek_token (parser)->value); *seen_id = true; inner->id_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } if (kind != C_DTR_NORMAL && c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { struct c_declarator *inner = build_id_declarator (NULL_TREE); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } /* Either we are at the end of an abstract declarator, or we have parentheses. */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree attrs; struct c_declarator *inner; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); if (kind != C_DTR_NORMAL && (c_parser_next_token_starts_declspecs (parser) || c_parser_next_token_is (parser, CPP_CLOSE_PAREN))) { struct c_arg_info *args = c_parser_parms_declarator (parser, kind == C_DTR_NORMAL, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator (args, build_id_declarator (NULL_TREE)); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } } /* A parenthesized declarator. */ inner = c_parser_declarator (parser, type_seen_p, kind, seen_id); if (inner != NULL && attrs != NULL) inner = build_attrs_declarator (attrs, inner); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (inner == NULL) return NULL; else return c_parser_direct_declarator_inner (parser, *seen_id, inner); } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } else { if (kind == C_DTR_NORMAL) { c_parser_error (parser, "expected identifier or %<(%>"); return NULL; } else return build_id_declarator (NULL_TREE); } } /* Parse part of a direct declarator or direct abstract declarator, given that some (in INNER) has already been parsed; ID_PRESENT is true if an identifier is present, false for an abstract declarator. */ static struct c_declarator * c_parser_direct_declarator_inner (c_parser *parser, bool id_present, struct c_declarator *inner) { /* Parse a sequence of array declarators and parameter lists. */ if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { struct c_declarator *declarator; struct c_declspecs *quals_attrs = build_null_declspecs (); bool static_seen; bool star_seen; tree dimen; c_parser_consume_token (parser); c_parser_declspecs (parser, quals_attrs, false, false, true); static_seen = c_parser_next_token_is_keyword (parser, RID_STATIC); if (static_seen) c_parser_consume_token (parser); if (static_seen && !quals_attrs->declspecs_seen_p) c_parser_declspecs (parser, quals_attrs, false, false, true); if (!quals_attrs->declspecs_seen_p) quals_attrs = NULL; /* If "static" is present, there must be an array dimension. Otherwise, there may be a dimension, "*", or no dimension. */ if (static_seen) { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL).value; } else { if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { dimen = NULL_TREE; star_seen = false; } else if (c_parser_next_token_is (parser, CPP_MULT)) { if (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_SQUARE) { dimen = NULL_TREE; star_seen = true; c_parser_consume_token (parser); } else { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL).value; } } else { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL).value; } } if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) c_parser_consume_token (parser); else { c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); return NULL; } declarator = build_array_declarator (dimen, quals_attrs, static_seen, star_seen); if (declarator == NULL) return NULL; inner = set_array_declarator_inner (declarator, inner, !id_present); return c_parser_direct_declarator_inner (parser, id_present, inner); } else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree attrs; struct c_arg_info *args; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); args = c_parser_parms_declarator (parser, id_present, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator (args, inner); return c_parser_direct_declarator_inner (parser, id_present, inner); } } return inner; } /* Parse a parameter list or identifier list, including the closing parenthesis but not the opening one. ATTRS are the attributes at the start of the list. ID_LIST_OK is true if an identifier list is acceptable; such a list must not have attributes at the start. */ static struct c_arg_info * c_parser_parms_declarator (c_parser *parser, bool id_list_ok, tree attrs) { push_scope (); declare_parm_level (); /* If the list starts with an identifier, it is an identifier list. Otherwise, it is either a prototype list or an empty list. */ if (id_list_ok && !attrs && c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { tree list = NULL_TREE, *nextp = &list; while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { *nextp = build_tree_list (NULL_TREE, c_parser_peek_token (parser)->value); nextp = & TREE_CHAIN (*nextp); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_error (parser, "expected identifier"); break; } } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = list; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; c_parser_consume_token (parser); pop_scope (); return ret; } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); pop_scope (); return NULL; } } else { struct c_arg_info *ret = c_parser_parms_list_declarator (parser, attrs); pop_scope (); return ret; } } /* Parse a parameter list (possibly empty), including the closing parenthesis but not the opening one. ATTRS are the attributes at the start of the list. */ static struct c_arg_info * c_parser_parms_list_declarator (c_parser *parser, tree attrs) { bool good_parm = false; /* ??? Following the old parser, forward parameter declarations may use abstract declarators, and if no real parameter declarations follow the forward declarations then this is not diagnosed. Also note as above that attributes are ignored as the only contents of the parentheses, or as the only contents after forward declarations. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; c_parser_consume_token (parser); return ret; } if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; /* Suppress -Wold-style-definition for this case. */ ret->types = error_mark_node; error ("ISO C requires a named argument before %<...%>"); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); return ret; } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } /* Nonempty list of parameters, either terminated with semicolon (forward declarations; recurse) or with close parenthesis (normal function) or with ", ... )" (variadic function). */ while (true) { /* Parse a parameter. */ struct c_parm *parm = c_parser_parameter_declaration (parser, attrs); attrs = NULL_TREE; if (parm != NULL) { good_parm = true; push_parm_decl (parm); } if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { tree new_attrs; c_parser_consume_token (parser); mark_forward_parm_decls (); new_attrs = c_parser_attributes (parser); return c_parser_parms_list_declarator (parser, new_attrs); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (good_parm) return get_parm_info (false); else { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; return ret; } } if (!c_parser_require (parser, CPP_COMMA, "expected %<;%>, %<,%> or %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL; } if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (good_parm) return get_parm_info (true); else { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; return ret; } } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } } } /* Parse a parameter declaration. ATTRS are the attributes at the start of the declaration if it is the first parameter. */ static struct c_parm * c_parser_parameter_declaration (c_parser *parser, tree attrs) { struct c_declspecs *specs; struct c_declarator *declarator; tree prefix_attrs; tree postfix_attrs = NULL_TREE; bool dummy = false; if (!c_parser_next_token_starts_declspecs (parser)) { /* ??? In some Objective-C cases '...' isn't applicable so there should be a different message. */ c_parser_error (parser, "expected declaration specifiers or %<...%>"); c_parser_skip_to_end_of_parameter (parser); return NULL; } specs = build_null_declspecs (); if (attrs) { declspecs_add_attrs (specs, attrs); attrs = NULL_TREE; } c_parser_declspecs (parser, specs, true, true, true); finish_declspecs (specs); pending_xref_error (); prefix_attrs = specs->attrs; specs->attrs = NULL_TREE; declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_PARM, &dummy); if (declarator == NULL) { c_parser_skip_until_found (parser, CPP_COMMA, NULL); return NULL; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); return build_c_parm (specs, chainon (postfix_attrs, prefix_attrs), declarator); } /* Parse a string literal in an asm expression. It should not be translated, and wide string literals are an error although permitted by the syntax. This is a GNU extension. asm-string-literal: string-literal ??? At present, following the old parser, the caller needs to have set c_lex_string_translate to 0. It would be better to follow the C++ parser rather than using the c_lex_string_translate kludge. */ static tree c_parser_asm_string_literal (c_parser *parser) { tree str; if (c_parser_next_token_is (parser, CPP_STRING)) { str = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else if (c_parser_next_token_is (parser, CPP_WSTRING)) { error ("wide string literal in %<asm%>"); str = build_string (1, ""); c_parser_consume_token (parser); } else { c_parser_error (parser, "expected string literal"); str = NULL_TREE; } return str; } /* Parse a simple asm expression. This is used in restricted contexts, where a full expression with inputs and outputs does not make sense. This is a GNU extension. simple-asm-expr: asm ( asm-string-literal ) */ static tree c_parser_simple_asm_expr (c_parser *parser) { tree str; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM)); /* ??? Follow the C++ parser rather than using the c_lex_string_translate kludge. */ c_lex_string_translate = 0; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { c_lex_string_translate = 1; return NULL_TREE; } str = c_parser_asm_string_literal (parser); c_lex_string_translate = 1; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } return str; } /* Parse (possibly empty) attributes. This is a GNU extension. attributes: empty attributes attribute attribute: __attribute__ ( ( attribute-list ) ) attribute-list: attrib attribute_list , attrib attrib: empty any-word any-word ( identifier ) any-word ( identifier , nonempty-expr-list ) any-word ( expr-list ) where the "identifier" must not be declared as a type, and "any-word" may be any identifier (including one declared as a type), a reserved word storage class specifier, type specifier or type qualifier. ??? This still leaves out most reserved keywords (following the old parser), shouldn't we include them, and why not allow identifiers declared as types to start the arguments? */ static tree c_parser_attributes (c_parser *parser) { tree attrs = NULL_TREE; while (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { /* ??? Follow the C++ parser rather than using the c_lex_string_translate kludge. */ c_lex_string_translate = 0; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { c_lex_string_translate = 1; return attrs; } if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { c_lex_string_translate = 1; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return attrs; } /* Parse the attribute list. */ while (c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_NAME) || c_parser_next_token_is (parser, CPP_KEYWORD)) { tree attr, attr_name, attr_args; if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); continue; } if (c_parser_next_token_is (parser, CPP_KEYWORD)) { /* ??? See comment above about what keywords are accepted here. */ bool ok; switch (c_parser_peek_token (parser)->keyword) { case RID_STATIC: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_SHORT: case RID_INLINE: case RID_VOLATILE: case RID_SIGNED: case RID_AUTO: case RID_RESTRICT: case RID_COMPLEX: case RID_THREAD: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: ok = true; break; default: ok = false; break; } if (!ok) break; } attr_name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN)) { attr = build_tree_list (attr_name, NULL_TREE); attrs = chainon (attrs, attr); continue; } c_parser_consume_token (parser); /* Parse the attribute contents. If they start with an identifier which is followed by a comma or close parenthesis, then the arguments start with that identifier; otherwise they are an expression list. */ if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID && ((c_parser_peek_2nd_token (parser)->type == CPP_COMMA) || (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN))) { tree arg1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) attr_args = build_tree_list (NULL_TREE, arg1); else { c_parser_consume_token (parser); attr_args = tree_cons (NULL_TREE, arg1, c_parser_expr_list (parser, false)); } } else { if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) attr_args = NULL_TREE; else attr_args = c_parser_expr_list (parser, false); } attr = build_tree_list (attr_name, attr_args); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { c_lex_string_translate = 1; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } attrs = chainon (attrs, attr); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { c_lex_string_translate = 1; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { c_lex_string_translate = 1; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } c_lex_string_translate = 1; } return attrs; } /* Parse a type name (C90 6.5.5, C99 6.7.6). type-name: specifier-qualifier-list abstract-declarator[opt] */ static struct c_type_name * c_parser_type_name (c_parser *parser) { struct c_declspecs *specs = build_null_declspecs (); struct c_declarator *declarator; struct c_type_name *ret; bool dummy = false; c_parser_declspecs (parser, specs, false, true, true); if (!specs->declspecs_seen_p) { c_parser_error (parser, "expected specifier-qualifier-list"); return NULL; } pending_xref_error (); finish_declspecs (specs); declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_ABSTRACT, &dummy); if (declarator == NULL) return NULL; ret = XOBNEW (&parser_obstack, struct c_type_name); ret->specs = specs; ret->declarator = declarator; return ret; } /* Parse an initializer (C90 6.5.7, C99 6.7.8). initializer: assignment-expression { initializer-list } { initializer-list , } initializer-list: designation[opt] initializer initializer-list , designation[opt] initializer designation: designator-list = designator-list: designator designator-list designator designator: array-designator . identifier array-designator: [ constant-expression ] GNU extensions: initializer: { } designation: array-designator identifier : array-designator: [ constant-expression ... constant-expression ] Any expression without commas is accepted in the syntax for the constant-expressions, with non-constant expressions rejected later. This function is only used for top-level initializers; for nested ones, see c_parser_initval. */ static struct c_expr c_parser_initializer (c_parser *parser) { if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return c_parser_braced_init (parser, NULL_TREE, false); else { struct c_expr ret; ret = c_parser_expr_no_commas (parser, NULL); if (TREE_CODE (ret.value) != STRING_CST && TREE_CODE (ret.value) != COMPOUND_LITERAL_EXPR) ret = default_function_array_conversion (ret); return ret; } } /* Parse a braced initializer list. TYPE is the type specified for a compound literal, and NULL_TREE for other initializers and for nested braced lists. NESTED_P is true for nested braced lists, false for the list of a compound literal or the list that is the top-level initializer in a declaration. */ static struct c_expr c_parser_braced_init (c_parser *parser, tree type, bool nested_p) { gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE)); c_parser_consume_token (parser); if (nested_p) push_init_level (0); else really_start_incremental_init (type); if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { if (pedantic) pedwarn ("ISO C forbids empty initializer braces"); } else { /* Parse a non-empty initializer list, possibly with a trailing comma. */ while (true) { c_parser_initelt (parser); if (parser->error) break; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; } } if (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE)) { struct c_expr ret; ret.value = error_mark_node; ret.original_code = ERROR_MARK; c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %<}%>"); return ret; } c_parser_consume_token (parser); return pop_init_level (0); } /* Parse a nested initializer, including designators. */ static void c_parser_initelt (c_parser *parser) { /* Parse any designator or designator list. A single array designator may have the subsequent "=" omitted in GNU C, but a longer list or a structure member designator may not. */ if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON) { /* Old-style structure member designator. */ set_init_label (c_parser_peek_token (parser)->value); if (pedantic) pedwarn ("obsolete use of designated initializer with %<:%>"); c_parser_consume_token (parser); c_parser_consume_token (parser); } else { /* des_seen is 0 if there have been no designators, 1 if there has been a single array designator and 2 otherwise. */ int des_seen = 0; while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE) || c_parser_next_token_is (parser, CPP_DOT)) { int des_prev = des_seen; if (des_seen < 2) des_seen++; if (c_parser_next_token_is (parser, CPP_DOT)) { des_seen = 2; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { set_init_label (c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else { struct c_expr init; init.value = error_mark_node; init.original_code = ERROR_MARK; c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_COMMA, NULL); process_init_element (init); return; } } else { tree first, second; /* ??? Following the old parser, [ objc-receiver objc-message-args ] is accepted as an initializer, being distinguished from a designator by what follows the first assignment expression inside the square brackets, but after a first array designator a subsequent square bracket is for Objective-C taken to start an expression, using the obsolete form of designated initializer without '=', rather than possibly being a second level of designation: in LALR terms, the '[' is shifted rather than reducing designator to designator-list. */ if (des_prev == 1 && c_dialect_objc ()) { des_seen = des_prev; break; } if (des_prev == 0 && c_dialect_objc ()) { /* This might be an array designator or an Objective-C message expression. If the former, continue parsing here; if the latter, parse the remainder of the initializer given the starting primary-expression. ??? It might make sense to distinguish when des_prev == 1 as well; see previous comment. */ tree rec, args; struct c_expr mexpr; c_parser_consume_token (parser); if (c_parser_peek_token (parser)->type == CPP_NAME && ((c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME) || (c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))) { /* Type name receiver. */ tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); rec = objc_get_class_reference (id); goto parse_message_args; } first = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_next_token_is (parser, CPP_ELLIPSIS) || c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) goto array_desig_after_first; /* Expression receiver. So far only one part without commas has been parsed; there might be more of the expression. */ rec = first; while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_expr next; c_parser_consume_token (parser); next = c_parser_expr_no_commas (parser, NULL); next = default_function_array_conversion (next); rec = build_compound_expr (rec, next.value); } parse_message_args: /* Now parse the objc-message-args. */ args = c_parser_objc_message_args (parser); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); mexpr.value = objc_build_message_expr (build_tree_list (rec, args)); mexpr.original_code = ERROR_MARK; /* Now parse and process the remainder of the initializer, starting with this message expression as a primary-expression. */ c_parser_initval (parser, &mexpr); return; } c_parser_consume_token (parser); first = c_parser_expr_no_commas (parser, NULL).value; array_desig_after_first: if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { c_parser_consume_token (parser); second = c_parser_expr_no_commas (parser, NULL).value; } else second = NULL_TREE; if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { c_parser_consume_token (parser); set_init_index (first, second); if (pedantic && second) pedwarn ("ISO C forbids specifying range of " "elements to initialize"); } else c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } } if (des_seen >= 1) { if (c_parser_next_token_is (parser, CPP_EQ)) { if (pedantic && !flag_isoc99) pedwarn ("ISO C90 forbids specifying subobject to initialize"); c_parser_consume_token (parser); } else { if (des_seen == 1) { if (pedantic) pedwarn ("obsolete use of designated initializer " "without %<=%>"); } else { struct c_expr init; init.value = error_mark_node; init.original_code = ERROR_MARK; c_parser_error (parser, "expected %<=%>"); c_parser_skip_until_found (parser, CPP_COMMA, NULL); process_init_element (init); return; } } } } c_parser_initval (parser, NULL); } /* Parse a nested initializer; as c_parser_initializer but parses initializers within braced lists, after any designators have been applied. If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the initializer. */ static void c_parser_initval (c_parser *parser, struct c_expr *after) { struct c_expr init; gcc_assert (!after || c_dialect_objc ()); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE) && !after) init = c_parser_braced_init (parser, NULL_TREE, true); else { init = c_parser_expr_no_commas (parser, after); if (init.value != NULL_TREE && TREE_CODE (init.value) != STRING_CST && TREE_CODE (init.value) != COMPOUND_LITERAL_EXPR) init = default_function_array_conversion (init); } process_init_element (init); } /* Parse a compound statement (possibly a function body) (C90 6.6.2, C99 6.8.2). compound-statement: { block-item-list[opt] } { label-declarations block-item-list } block-item-list: block-item block-item-list block-item block-item: nested-declaration statement nested-declaration: declaration GNU extensions: compound-statement: { label-declarations block-item-list } nested-declaration: __extension__ nested-declaration nested-function-definition label-declarations: label-declaration label-declarations label-declaration label-declaration: __label__ identifier-list ; Allowing the mixing of declarations and code is new in C99. The GNU syntax also permits (not shown above) labels at the end of compound statements, which yield an error. We don't allow labels on declarations; this might seem like a natural extension, but there would be a conflict between attributes on the label and prefix attributes on the declaration. ??? The syntax follows the old parser in requiring something after label declarations. Although they are erroneous if the labels declared aren't defined, is it useful for the syntax to be this way? OpenMP: block-item: openmp-directive openmp-directive: barrier-directive flush-directive */ static tree c_parser_compound_statement (c_parser *parser) { tree stmt; if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) return error_mark_node; stmt = c_begin_compound_stmt (true); c_parser_compound_statement_nostart (parser); return c_end_compound_stmt (stmt, true); } /* Parse a compound statement except for the opening brace. This is used for parsing both compound statements and statement expressions (which follow different paths to handling the opening). */ static void c_parser_compound_statement_nostart (c_parser *parser) { bool last_stmt = false; bool last_label = false; /* APPLE LOCAL radar 5732232 - blocks (not in C++) */ bool first_stmt = true; if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); return; } if (c_parser_next_token_is_keyword (parser, RID_LABEL)) { /* Read zero or more forward-declarations for labels that nested functions can jump to. */ while (c_parser_next_token_is_keyword (parser, RID_LABEL)) { c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree label; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } label = declare_label (c_parser_peek_token (parser)->value); C_DECLARED_LABEL_FLAG (label) = 1; add_stmt (build_stmt (DECL_EXPR, label)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* ??? Locating this diagnostic on the token after the declarations end follows the old parser, but it might be better to locate it where the declarations start instead. */ if (pedantic) pedwarn ("ISO C forbids label declarations"); } /* We must now have at least one statement, label or declaration. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_error (parser, "expected declaration or statement"); c_parser_consume_token (parser); return; } while (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE)) { location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) { last_label = true; last_stmt = false; c_parser_label (parser); } else if (!last_label && c_parser_next_token_starts_declspecs (parser)) { last_label = false; /* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */ c_parser_declaration_or_fndef (parser, true, true, true, true, NULL); if (last_stmt && ((pedantic && !flag_isoc99) || warn_declaration_after_statement)) pedwarn_c90 ("%HISO C90 forbids mixed declarations and code", &loc); last_stmt = false; } else if (!last_label && c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { /* __extension__ can start a declaration, but is also an unary operator that can start an expression. Consume all but the last of a possible series of __extension__ to determine which. */ while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token (parser)->keyword == RID_EXTENSION)) c_parser_consume_token (parser); if (c_token_starts_declspecs (c_parser_peek_2nd_token (parser))) { int ext; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); last_label = false; /* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */ c_parser_declaration_or_fndef (parser, true, true, true, true, NULL); /* Following the old parser, __extension__ does not disable this diagnostic. */ restore_extension_diagnostics (ext); if (last_stmt && ((pedantic && !flag_isoc99) || warn_declaration_after_statement)) pedwarn_c90 ("%HISO C90 forbids mixed declarations and code", &loc); last_stmt = false; } else goto statement; } else if (c_parser_next_token_is (parser, CPP_PRAGMA)) { /* External pragmas, and some omp pragmas, are not associated with regular c code, and so are not to be considered statements syntactically. This ensures that the user doesn't put them places that would turn into syntax errors if the directive were ignored. */ if (c_parser_pragma (parser, pragma_compound)) last_label = false, last_stmt = true; } else if (c_parser_next_token_is (parser, CPP_EOF)) { c_parser_error (parser, "expected declaration or statement"); return; } else { statement: last_label = false; last_stmt = true; c_parser_statement_after_labels (parser); } parser->error = false; /* APPLE LOCAL radar 5732232 - blocks (not in C++) */ first_stmt = false; } if (last_label) error ("label at end of compound statement"); c_parser_consume_token (parser); } /* Parse a label (C90 6.6.1, C99 6.8.1). label: identifier : attributes[opt] case constant-expression : default : GNU extensions: label: case constant-expression ... constant-expression : The use of attributes on labels is a GNU extension. The syntax in GNU C accepts any expressions without commas, non-constant expressions being rejected later. */ static void c_parser_label (c_parser *parser) { location_t loc1 = c_parser_peek_token (parser)->location; tree label = NULL_TREE; if (c_parser_next_token_is_keyword (parser, RID_CASE)) { tree exp1, exp2; c_parser_consume_token (parser); exp1 = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); label = do_case (exp1, NULL_TREE); } else if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { c_parser_consume_token (parser); exp2 = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) label = do_case (exp1, exp2); } else c_parser_error (parser, "expected %<:%> or %<...%>"); } else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT)) { c_parser_consume_token (parser); if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) label = do_case (NULL_TREE, NULL_TREE); } else { tree name = c_parser_peek_token (parser)->value; tree tlab; location_t loc2; tree attrs; gcc_assert (c_parser_next_token_is (parser, CPP_NAME)); c_parser_consume_token (parser); gcc_assert (c_parser_next_token_is (parser, CPP_COLON)); loc2 = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); tlab = define_label (loc2, name); if (tlab) { decl_attributes (&tlab, attrs, 0); label = add_stmt (build_stmt (LABEL_EXPR, tlab)); } } if (label) SET_EXPR_LOCATION (label, loc1); } /* Parse a statement (C90 6.6, C99 6.8). statement: labeled-statement compound-statement expression-statement selection-statement iteration-statement jump-statement labeled-statement: label statement expression-statement: expression[opt] ; selection-statement: if-statement switch-statement iteration-statement: while-statement do-statement for-statement jump-statement: goto identifier ; continue ; break ; return expression[opt] ; GNU extensions: statement: asm-statement jump-statement: goto * expression ; Objective-C: statement: objc-throw-statement objc-try-catch-statement objc-synchronized-statement objc-throw-statement: @throw expression ; @throw ; OpenMP: statement: openmp-construct openmp-construct: parallel-construct for-construct sections-construct single-construct parallel-for-construct parallel-sections-construct master-construct critical-construct atomic-construct ordered-construct parallel-construct: parallel-directive structured-block for-construct: for-directive iteration-statement sections-construct: sections-directive section-scope single-construct: single-directive structured-block parallel-for-construct: parallel-for-directive iteration-statement parallel-sections-construct: parallel-sections-directive section-scope master-construct: master-directive structured-block critical-construct: critical-directive structured-block atomic-construct: atomic-directive expression-statement ordered-construct: ordered-directive structured-block */ static void c_parser_statement (c_parser *parser) { while (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) c_parser_label (parser); c_parser_statement_after_labels (parser); } /* Parse a statement, other than a labeled statement. */ static void c_parser_statement_after_labels (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree stmt = NULL_TREE; switch (c_parser_peek_token (parser)->type) { case CPP_OPEN_BRACE: add_stmt (c_parser_compound_statement (parser)); break; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_IF: c_parser_if_statement (parser); break; case RID_SWITCH: c_parser_switch_statement (parser); break; case RID_WHILE: c_parser_while_statement (parser); break; case RID_DO: c_parser_do_statement (parser); break; case RID_FOR: c_parser_for_statement (parser); break; case RID_GOTO: /* APPLE LOCAL begin radar 5732232 - blocks (C++ cb) */ if (cur_block) error ("goto not allowed in block literal"); /* APPLE LOCAL end radar 5732232 - blocks (C++ cb) */ c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { stmt = c_finish_goto_label (c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else if (c_parser_next_token_is (parser, CPP_MULT)) { c_parser_consume_token (parser); stmt = c_finish_goto_ptr (c_parser_expression (parser).value); } else c_parser_error (parser, "expected identifier or %<*%>"); goto expect_semicolon; case RID_CONTINUE: c_parser_consume_token (parser); stmt = c_finish_bc_stmt (&c_cont_label, false); goto expect_semicolon; case RID_BREAK: c_parser_consume_token (parser); stmt = c_finish_bc_stmt (&c_break_label, true); goto expect_semicolon; case RID_RETURN: c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { stmt = c_finish_return (NULL_TREE); c_parser_consume_token (parser); } else { stmt = c_finish_return (c_parser_expression_conv (parser).value); goto expect_semicolon; } break; case RID_ASM: stmt = c_parser_asm_statement (parser); break; case RID_AT_THROW: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { stmt = objc_build_throw_stmt (NULL_TREE); c_parser_consume_token (parser); } else { stmt = objc_build_throw_stmt (c_parser_expression (parser).value); goto expect_semicolon; } break; case RID_AT_TRY: gcc_assert (c_dialect_objc ()); c_parser_objc_try_catch_statement (parser); break; case RID_AT_SYNCHRONIZED: gcc_assert (c_dialect_objc ()); c_parser_objc_synchronized_statement (parser); break; default: goto expr_stmt; } break; case CPP_SEMICOLON: c_parser_consume_token (parser); break; case CPP_CLOSE_PAREN: case CPP_CLOSE_SQUARE: /* Avoid infinite loop in error recovery: c_parser_skip_until_found stops at a closing nesting delimiter without consuming it, but here we need to consume it to proceed further. */ c_parser_error (parser, "expected statement"); c_parser_consume_token (parser); break; case CPP_PRAGMA: c_parser_pragma (parser, pragma_stmt); break; default: expr_stmt: stmt = c_finish_expr_stmt (c_parser_expression_conv (parser).value); expect_semicolon: c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); break; } /* Two cases cannot and do not have line numbers associated: If stmt is degenerate, such as "2;", then stmt is an INTEGER_CST, which cannot hold line numbers. But that's OK because the statement will either be changed to a MODIFY_EXPR during gimplification of the statement expr, or discarded. If stmt was compound, but without new variables, we will have skipped the creation of a BIND and will have a bare STATEMENT_LIST. But that's OK because (recursively) all of the component statements should already have line numbers assigned. ??? Can we discard no-op statements earlier? */ /* APPLE LOCAL begin Radar 6144634 */ /* Normal expr stmts, including modify exprs, get the location where the statement began, i.e. 'loc'. Assignments of Blocks to Block pointer variables get the location of the end of the Block definition, i.e. 'input_location', which should already be set by this point. */ if (stmt && EXPR_P (stmt)) { if (TREE_CODE (stmt) == MODIFY_EXPR && TREE_CODE (TREE_TYPE (TREE_OPERAND (stmt, 0))) == BLOCK_POINTER_TYPE) SET_EXPR_LOCATION (stmt, input_location); else SET_EXPR_LOCATION (stmt, loc); } /* APPLE LOCAL end Radar 6144634 */ } /* Parse a parenthesized condition from an if, do or while statement. condition: ( expression ) */ static tree c_parser_paren_condition (c_parser *parser) { location_t loc; tree cond; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return error_mark_node; loc = c_parser_peek_token (parser)->location; cond = c_objc_common_truthvalue_conversion (c_parser_expression_conv (parser).value); if (EXPR_P (cond)) SET_EXPR_LOCATION (cond, loc); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return cond; } /* Parse a statement which is a block in C99. */ static tree c_parser_c99_block_statement (c_parser *parser) { tree block = c_begin_compound_stmt (flag_isoc99); c_parser_statement (parser); return c_end_compound_stmt (block, flag_isoc99); } /* Parse the body of an if statement or the else half thereof. This is just parsing a statement but (a) it is a block in C99, (b) we track whether the body is an if statement for the sake of -Wparentheses warnings, (c) we handle an empty body specially for the sake of -Wextra warnings. */ static tree c_parser_if_body (c_parser *parser, bool *if_p) { tree block = c_begin_compound_stmt (flag_isoc99); while (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) c_parser_label (parser); *if_p = c_parser_next_token_is_keyword (parser, RID_IF); if (extra_warnings && c_parser_next_token_is (parser, CPP_SEMICOLON)) add_stmt (build_empty_stmt ()); c_parser_statement_after_labels (parser); return c_end_compound_stmt (block, flag_isoc99); } /* Parse an if statement (C90 6.6.4, C99 6.8.4). if-statement: if ( expression ) statement if ( expression ) statement else statement */ static void c_parser_if_statement (c_parser *parser) { tree block; location_t loc; tree cond; bool first_if = false, second_if = false; tree first_body, second_body; gcc_assert (c_parser_next_token_is_keyword (parser, RID_IF)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; cond = c_parser_paren_condition (parser); first_body = c_parser_if_body (parser, &first_if); if (c_parser_next_token_is_keyword (parser, RID_ELSE)) { c_parser_consume_token (parser); second_body = c_parser_if_body (parser, &second_if); } else second_body = NULL_TREE; c_finish_if_stmt (loc, cond, first_body, second_body, first_if); add_stmt (c_end_compound_stmt (block, flag_isoc99)); } /* Parse a switch statement (C90 6.6.4, C99 6.8.4). switch-statement: switch (expression) statement */ static void c_parser_switch_statement (c_parser *parser) { tree block, expr, body, save_break; gcc_assert (c_parser_next_token_is_keyword (parser, RID_SWITCH)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr = c_parser_expression (parser).value; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else expr = error_mark_node; c_start_case (expr); save_break = c_break_label; c_break_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_finish_case (body); if (c_break_label) add_stmt (build1 (LABEL_EXPR, void_type_node, c_break_label)); c_break_label = save_break; add_stmt (c_end_compound_stmt (block, flag_isoc99)); } /* Parse a while statement (C90 6.6.5, C99 6.8.5). while-statement: APPLE LOCAL begin for-fsf-4_4 3274130 5295549 while attributes (expression) statement The use of attributes is a GNU extension. APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ static void c_parser_while_statement (c_parser *parser) { /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ tree block, cond, body, save_break, save_cont, attrs; /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_WHILE)); c_parser_consume_token (parser); /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ attrs = c_parser_attributes (parser); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; cond = c_parser_paren_condition (parser); save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ c_finish_loop (loc, cond, NULL, body, c_break_label, c_cont_label, attrs, true); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ add_stmt (c_end_compound_stmt (block, flag_isoc99)); c_break_label = save_break; c_cont_label = save_cont; } /* Parse a do statement (C90 6.6.5, C99 6.8.5). do-statement: APPLE LOCAL begin for-fsf-4_4 3274130 5295549 do attributes statement while ( expression ) ; The use of attributes is a GNU extension. APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ static void c_parser_do_statement (c_parser *parser) { /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ tree block, cond, body, save_break, save_cont, new_break, new_cont, attrs; /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_DO)); c_parser_consume_token (parser); /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ attrs = c_parser_attributes (parser); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_parser_require_keyword (parser, RID_WHILE, "expected %<while%>"); new_break = c_break_label; c_break_label = save_break; new_cont = c_cont_label; c_cont_label = save_cont; cond = c_parser_paren_condition (parser); if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ c_finish_loop (loc, cond, NULL, body, new_break, new_cont, attrs, false); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ add_stmt (c_end_compound_stmt (block, flag_isoc99)); } /* Parse a for statement (C90 6.6.5, C99 6.8.5). for-statement: APPLE LOCAL begin for-fsf-4_4 3274130 5295549 for attributes ( expression[opt] ; expression[opt] ; expression[opt] ) \ statement for attributes ( nested-declaration expression[opt] ; expression[opt] ) \ statement The form with a declaration is new in C99. The use of attributes is a GNU extension. APPLE LOCAL end for-fsf-4_4 3274130 5295549 ??? In accordance with the old parser, the declaration may be a nested function, which is then rejected in check_for_loop_decls, but does it make any sense for this to be included in the grammar? Note in particular that the nested function does not include a trailing ';', whereas the "declaration" production includes one. Also, can we reject bad declarations earlier and cheaper than check_for_loop_decls? */ static void c_parser_for_statement (c_parser *parser) { /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ tree block, cond, incr, save_break, save_cont, body, attrs; /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ location_t loc; /* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */ bool foreach_p = false; gcc_assert (c_parser_next_token_is_keyword (parser, RID_FOR)); loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ attrs = c_parser_attributes (parser); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ block = c_begin_compound_stmt (flag_isoc99); if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { /* Parse the initialization declaration or expression. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); c_finish_expr_stmt (NULL_TREE); } else if (c_parser_next_token_starts_declspecs (parser)) { /* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */ cond = NULL_TREE; c_parser_declaration_or_fndef (parser, true, true, true, true, &cond); /* APPLE LOCAL radar 5925639 */ if (c_parser_next_token_is_keyword (parser, RID_IN) && cond) { cond = finish_parse_foreach_header (parser, cond); foreach_p = true; } else check_for_loop_decls (); /* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */ } else if (c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { /* __extension__ can start a declaration, but is also an unary operator that can start an expression. Consume all but the last of a possible series of __extension__ to determine which. */ while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token (parser)->keyword == RID_EXTENSION)) c_parser_consume_token (parser); if (c_token_starts_declspecs (c_parser_peek_2nd_token (parser))) { int ext; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); /* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */ cond = NULL_TREE; c_parser_declaration_or_fndef (parser, true, true, true, true, &cond); restore_extension_diagnostics (ext); /* APPLE LOCAL radar 5925639 */ if (c_parser_next_token_is_keyword (parser, RID_IN) && cond) { cond = finish_parse_foreach_header (parser, cond); foreach_p = true; } else check_for_loop_decls (); /* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */ } else goto init_expr; } else { init_expr: /* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */ cond = c_parser_expression (parser).value; if (c_parser_next_token_is_keyword (parser, RID_IN)) { c_parser_consume_token (parser); /* IN */ cond = build_tree_list (cond, c_parser_initializer (parser).value); foreach_p = true; } else { c_finish_expr_stmt (cond); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } } objc_foreach_context = 0; /* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */ /* Parse the loop condition. */ loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); cond = NULL_TREE; } /* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */ else if (foreach_p) ; /* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */ else { tree ocond = c_parser_expression_conv (parser).value; cond = c_objc_common_truthvalue_conversion (ocond); if (EXPR_P (cond)) SET_EXPR_LOCATION (cond, loc); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse the increment expression. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) incr = c_process_expr_stmt (NULL_TREE); else incr = c_process_expr_stmt (c_parser_expression (parser).value); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else { cond = error_mark_node; incr = error_mark_node; } save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ c_finish_loop (loc, cond, incr, body, c_break_label, c_cont_label, attrs, true); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ add_stmt (c_end_compound_stmt (block, flag_isoc99)); c_break_label = save_break; c_cont_label = save_cont; } /* Parse an asm statement, a GNU extension. This is a full-blown asm statement with inputs, outputs, clobbers, and volatile tag allowed. asm-statement: asm type-qualifier[opt] ( asm-argument ) ; asm-argument: asm-string-literal asm-string-literal : asm-operands[opt] asm-string-literal : asm-operands[opt] : asm-operands[opt] asm-string-literal : asm-operands[opt] : asm-operands[opt] : asm-clobbers Qualifiers other than volatile are accepted in the syntax but warned for. */ static tree c_parser_asm_statement (c_parser *parser) { tree quals, str, outputs, inputs, clobbers, ret; bool simple; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM)); c_parser_consume_token (parser); if (c_parser_next_token_is_keyword (parser, RID_VOLATILE)) { quals = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else if (c_parser_next_token_is_keyword (parser, RID_CONST) || c_parser_next_token_is_keyword (parser, RID_RESTRICT)) { warning (0, "%E qualifier ignored on asm", c_parser_peek_token (parser)->value); quals = NULL_TREE; c_parser_consume_token (parser); } else quals = NULL_TREE; /* ??? Follow the C++ parser rather than using the c_lex_string_translate kludge. */ c_lex_string_translate = 0; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { c_lex_string_translate = 1; return NULL_TREE; } str = c_parser_asm_string_literal (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { simple = true; outputs = NULL_TREE; inputs = NULL_TREE; clobbers = NULL_TREE; goto done_asm; } if (!c_parser_require (parser, CPP_COLON, "expected %<:%> or %<)%>")) { c_lex_string_translate = 1; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } simple = false; /* Parse outputs. */ if (c_parser_next_token_is (parser, CPP_COLON) || c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) outputs = NULL_TREE; else outputs = c_parser_asm_operands (parser, false); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { inputs = NULL_TREE; clobbers = NULL_TREE; goto done_asm; } if (!c_parser_require (parser, CPP_COLON, "expected %<:%> or %<)%>")) { c_lex_string_translate = 1; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } /* Parse inputs. */ if (c_parser_next_token_is (parser, CPP_COLON) || c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) inputs = NULL_TREE; else inputs = c_parser_asm_operands (parser, true); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { clobbers = NULL_TREE; goto done_asm; } if (!c_parser_require (parser, CPP_COLON, "expected %<:%> or %<)%>")) { c_lex_string_translate = 1; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } /* Parse clobbers. */ clobbers = c_parser_asm_clobbers (parser); done_asm: c_lex_string_translate = 1; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); ret = build_asm_stmt (quals, build_asm_expr (str, outputs, inputs, clobbers, simple)); return ret; } /* Parse asm operands, a GNU extension. If CONVERT_P (for inputs but not outputs), apply the default conversion of functions and arrays to pointers. asm-operands: asm-operand asm-operands , asm-operand asm-operand: asm-string-literal ( expression ) [ identifier ] asm-string-literal ( expression ) */ static tree c_parser_asm_operands (c_parser *parser, bool convert_p) { tree list = NULL_TREE; while (true) { tree name, str; struct c_expr expr; if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); name = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id)); } else { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return NULL_TREE; } c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } else name = NULL_TREE; str = c_parser_asm_string_literal (parser); if (str == NULL_TREE) return NULL_TREE; c_lex_string_translate = 1; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { c_lex_string_translate = 0; return NULL_TREE; } expr = c_parser_expression (parser); if (convert_p) expr = default_function_array_conversion (expr); c_lex_string_translate = 0; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } list = chainon (list, build_tree_list (build_tree_list (name, str), expr.value)); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } return list; } /* Parse asm clobbers, a GNU extension. asm-clobbers: asm-string-literal asm-clobbers , asm-string-literal */ static tree c_parser_asm_clobbers (c_parser *parser) { tree list = NULL_TREE; while (true) { tree str = c_parser_asm_string_literal (parser); if (str) list = tree_cons (NULL_TREE, str, list); else return NULL_TREE; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } return list; } /* Parse an expression other than a compound expression; that is, an assignment expression (C90 6.3.16, C99 6.5.16). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. assignment-expression: conditional-expression unary-expression assignment-operator assignment-expression assignment-operator: one of = *= /= %= += -= <<= >>= &= ^= |= In GNU C we accept any conditional expression on the LHS and diagnose the invalid lvalue rather than producing a syntax error. */ static struct c_expr c_parser_expr_no_commas (c_parser *parser, struct c_expr *after) { struct c_expr lhs, rhs, ret; enum tree_code code; gcc_assert (!after || c_dialect_objc ()); lhs = c_parser_conditional_expression (parser, after); switch (c_parser_peek_token (parser)->type) { case CPP_EQ: code = NOP_EXPR; break; case CPP_MULT_EQ: code = MULT_EXPR; break; case CPP_DIV_EQ: code = TRUNC_DIV_EXPR; break; case CPP_MOD_EQ: code = TRUNC_MOD_EXPR; break; case CPP_PLUS_EQ: code = PLUS_EXPR; break; case CPP_MINUS_EQ: code = MINUS_EXPR; break; case CPP_LSHIFT_EQ: code = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: code = RSHIFT_EXPR; break; case CPP_AND_EQ: code = BIT_AND_EXPR; break; case CPP_XOR_EQ: code = BIT_XOR_EXPR; break; case CPP_OR_EQ: code = BIT_IOR_EXPR; break; default: return lhs; } c_parser_consume_token (parser); rhs = c_parser_expr_no_commas (parser, NULL); rhs = default_function_array_conversion (rhs); ret.value = build_modify_expr (lhs.value, code, rhs.value); if (code == NOP_EXPR) ret.original_code = MODIFY_EXPR; else { TREE_NO_WARNING (ret.value) = 1; ret.original_code = ERROR_MARK; } return ret; } /* Parse a conditional expression (C90 6.3.15, C99 6.5.15). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. conditional-expression: logical-OR-expression logical-OR-expression ? expression : conditional-expression GNU extensions: conditional-expression: logical-OR-expression ? : conditional-expression */ static struct c_expr c_parser_conditional_expression (c_parser *parser, struct c_expr *after) { struct c_expr cond, exp1, exp2, ret; gcc_assert (!after || c_dialect_objc ()); cond = c_parser_binary_expression (parser, after); if (c_parser_next_token_is_not (parser, CPP_QUERY)) return cond; cond = default_function_array_conversion (cond); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COLON)) { if (pedantic) pedwarn ("ISO C forbids omitting the middle term of a ?: expression"); /* Make sure first operand is calculated only once. */ exp1.value = save_expr (default_conversion (cond.value)); cond.value = c_objc_common_truthvalue_conversion (exp1.value); skip_evaluation += cond.value == truthvalue_true_node; } else { cond.value = c_objc_common_truthvalue_conversion (default_conversion (cond.value)); skip_evaluation += cond.value == truthvalue_false_node; exp1 = c_parser_expression_conv (parser); skip_evaluation += ((cond.value == truthvalue_true_node) - (cond.value == truthvalue_false_node)); } if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) { skip_evaluation -= cond.value == truthvalue_true_node; ret.value = error_mark_node; ret.original_code = ERROR_MARK; return ret; } exp2 = c_parser_conditional_expression (parser, NULL); exp2 = default_function_array_conversion (exp2); skip_evaluation -= cond.value == truthvalue_true_node; ret.value = build_conditional_expr (cond.value, exp1.value, exp2.value); ret.original_code = ERROR_MARK; return ret; } /* Parse a binary expression; that is, a logical-OR-expression (C90 6.3.5-6.3.14, C99 6.5.5-6.5.14). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. multiplicative-expression: cast-expression multiplicative-expression * cast-expression multiplicative-expression / cast-expression multiplicative-expression % cast-expression additive-expression: multiplicative-expression additive-expression + multiplicative-expression additive-expression - multiplicative-expression shift-expression: additive-expression shift-expression << additive-expression shift-expression >> additive-expression relational-expression: shift-expression relational-expression < shift-expression relational-expression > shift-expression relational-expression <= shift-expression relational-expression >= shift-expression equality-expression: relational-expression equality-expression == relational-expression equality-expression != relational-expression AND-expression: equality-expression AND-expression & equality-expression exclusive-OR-expression: AND-expression exclusive-OR-expression ^ AND-expression inclusive-OR-expression: exclusive-OR-expression inclusive-OR-expression | exclusive-OR-expression logical-AND-expression: inclusive-OR-expression logical-AND-expression && inclusive-OR-expression logical-OR-expression: logical-AND-expression logical-OR-expression || logical-AND-expression */ static struct c_expr c_parser_binary_expression (c_parser *parser, struct c_expr *after) { /* A binary expression is parsed using operator-precedence parsing, with the operands being cast expressions. All the binary operators are left-associative. Thus a binary expression is of form: E0 op1 E1 op2 E2 ... which we represent on a stack. On the stack, the precedence levels are strictly increasing. When a new operator is encountered of higher precedence than that at the top of the stack, it is pushed; its LHS is the top expression, and its RHS is everything parsed until it is popped. When a new operator is encountered with precedence less than or equal to that at the top of the stack, triples E[i-1] op[i] E[i] are popped and replaced by the result of the operation until the operator at the top of the stack has lower precedence than the new operator or there is only one element on the stack; then the top expression is the LHS of the new operator. In the case of logical AND and OR expressions, we also need to adjust skip_evaluation as appropriate when the operators are pushed and popped. */ /* The precedence levels, where 0 is a dummy lowest level used for the bottom of the stack. */ enum prec { PREC_NONE, PREC_LOGOR, PREC_LOGAND, PREC_BITOR, PREC_BITXOR, PREC_BITAND, PREC_EQ, PREC_REL, PREC_SHIFT, PREC_ADD, PREC_MULT, NUM_PRECS }; struct { /* The expression at this stack level. */ struct c_expr expr; /* The precedence of the operator on its left, PREC_NONE at the bottom of the stack. */ enum prec prec; /* The operation on its left. */ enum tree_code op; } stack[NUM_PRECS]; int sp; #define POP \ do { \ switch (stack[sp].op) \ { \ case TRUTH_ANDIF_EXPR: \ skip_evaluation -= stack[sp - 1].expr.value == truthvalue_false_node; \ break; \ case TRUTH_ORIF_EXPR: \ skip_evaluation -= stack[sp - 1].expr.value == truthvalue_true_node; \ break; \ default: \ break; \ } \ stack[sp - 1].expr \ = default_function_array_conversion (stack[sp - 1].expr); \ stack[sp].expr \ = default_function_array_conversion (stack[sp].expr); \ stack[sp - 1].expr = parser_build_binary_op (stack[sp].op, \ stack[sp - 1].expr, \ stack[sp].expr); \ sp--; \ } while (0) gcc_assert (!after || c_dialect_objc ()); stack[0].expr = c_parser_cast_expression (parser, after); /* APPLE LOCAL begin radar 4426814 */ if (c_dialect_objc() && flag_objc_gc) /* APPLE LOCAL radar 5276085 */ stack[0].expr.value = objc_build_weak_reference_tree (stack[0].expr.value); /* APPLE LOCAL end radar 4426814 */ stack[0].prec = PREC_NONE; sp = 0; while (true) { enum prec oprec; enum tree_code ocode; if (parser->error) goto out; switch (c_parser_peek_token (parser)->type) { case CPP_MULT: oprec = PREC_MULT; ocode = MULT_EXPR; break; case CPP_DIV: oprec = PREC_MULT; ocode = TRUNC_DIV_EXPR; break; case CPP_MOD: oprec = PREC_MULT; ocode = TRUNC_MOD_EXPR; break; case CPP_PLUS: oprec = PREC_ADD; ocode = PLUS_EXPR; break; case CPP_MINUS: oprec = PREC_ADD; ocode = MINUS_EXPR; break; case CPP_LSHIFT: oprec = PREC_SHIFT; ocode = LSHIFT_EXPR; break; case CPP_RSHIFT: oprec = PREC_SHIFT; ocode = RSHIFT_EXPR; break; case CPP_LESS: oprec = PREC_REL; ocode = LT_EXPR; break; case CPP_GREATER: oprec = PREC_REL; ocode = GT_EXPR; break; case CPP_LESS_EQ: oprec = PREC_REL; ocode = LE_EXPR; break; case CPP_GREATER_EQ: oprec = PREC_REL; ocode = GE_EXPR; break; case CPP_EQ_EQ: oprec = PREC_EQ; ocode = EQ_EXPR; break; case CPP_NOT_EQ: oprec = PREC_EQ; ocode = NE_EXPR; break; case CPP_AND: oprec = PREC_BITAND; ocode = BIT_AND_EXPR; break; case CPP_XOR: oprec = PREC_BITXOR; ocode = BIT_XOR_EXPR; break; case CPP_OR: oprec = PREC_BITOR; ocode = BIT_IOR_EXPR; break; case CPP_AND_AND: oprec = PREC_LOGAND; ocode = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: oprec = PREC_LOGOR; ocode = TRUTH_ORIF_EXPR; break; default: /* Not a binary operator, so end of the binary expression. */ goto out; } c_parser_consume_token (parser); while (oprec <= stack[sp].prec) POP; switch (ocode) { case TRUTH_ANDIF_EXPR: stack[sp].expr = default_function_array_conversion (stack[sp].expr); stack[sp].expr.value = c_objc_common_truthvalue_conversion (default_conversion (stack[sp].expr.value)); skip_evaluation += stack[sp].expr.value == truthvalue_false_node; break; case TRUTH_ORIF_EXPR: stack[sp].expr = default_function_array_conversion (stack[sp].expr); stack[sp].expr.value = c_objc_common_truthvalue_conversion (default_conversion (stack[sp].expr.value)); skip_evaluation += stack[sp].expr.value == truthvalue_true_node; break; default: break; } sp++; stack[sp].expr = c_parser_cast_expression (parser, NULL); /* APPLE LOCAL begin radar 4426814 */ if (c_dialect_objc() && flag_objc_gc) /* APPLE LOCAL radar 5276085 */ stack[sp].expr.value = objc_build_weak_reference_tree (stack[sp].expr.value); /* APPLE LOCAL end radar 4426814 */ stack[sp].prec = oprec; stack[sp].op = ocode; } out: while (sp > 0) POP; return stack[0].expr; #undef POP } /* Parse a cast expression (C90 6.3.4, C99 6.5.4). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. cast-expression: unary-expression ( type-name ) unary-expression */ static struct c_expr c_parser_cast_expression (c_parser *parser, struct c_expr *after) { gcc_assert (!after || c_dialect_objc ()); if (after) return c_parser_postfix_expression_after_primary (parser, *after); /* If the expression begins with a parenthesized type name, it may be either a cast or a compound literal; we need to see whether the next character is '{' to tell the difference. If not, it is an unary expression. */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { struct c_type_name *type_name; struct c_expr ret; struct c_expr expr; c_parser_consume_token (parser); type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { ret.value = error_mark_node; ret.original_code = ERROR_MARK; return ret; } /* Save casted types in the function's used types hash table. */ used_types_insert (type_name->specs->type); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return c_parser_postfix_expression_after_paren_type (parser, type_name); expr = c_parser_cast_expression (parser, NULL); expr = default_function_array_conversion (expr); ret.value = c_cast_expr (type_name, expr.value); ret.original_code = ERROR_MARK; return ret; } else return c_parser_unary_expression (parser); } /* Parse an unary expression (C90 6.3.3, C99 6.5.3). unary-expression: postfix-expression ++ unary-expression -- unary-expression unary-operator cast-expression sizeof unary-expression sizeof ( type-name ) unary-operator: one of & * + - ~ ! GNU extensions: unary-expression: __alignof__ unary-expression __alignof__ ( type-name ) && identifier unary-operator: one of __extension__ __real__ __imag__ In addition, the GNU syntax treats ++ and -- as unary operators, so they may be applied to cast expressions with errors for non-lvalues given later. */ static struct c_expr c_parser_unary_expression (c_parser *parser) { int ext; struct c_expr ret, op; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS_PLUS: c_parser_consume_token (parser); op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (op); return parser_build_unary_op (PREINCREMENT_EXPR, op); case CPP_MINUS_MINUS: c_parser_consume_token (parser); op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (op); return parser_build_unary_op (PREDECREMENT_EXPR, op); case CPP_AND: c_parser_consume_token (parser); return parser_build_unary_op (ADDR_EXPR, c_parser_cast_expression (parser, NULL)); case CPP_MULT: c_parser_consume_token (parser); op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (op); ret.value = build_indirect_ref (op.value, "unary *"); ret.original_code = ERROR_MARK; return ret; case CPP_PLUS: c_parser_consume_token (parser); if (!c_dialect_objc () && !in_system_header) warning (OPT_Wtraditional, "traditional C rejects the unary plus operator"); op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (op); return parser_build_unary_op (CONVERT_EXPR, op); case CPP_MINUS: c_parser_consume_token (parser); op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (op); return parser_build_unary_op (NEGATE_EXPR, op); case CPP_COMPL: c_parser_consume_token (parser); op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (op); return parser_build_unary_op (BIT_NOT_EXPR, op); case CPP_NOT: c_parser_consume_token (parser); op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (op); return parser_build_unary_op (TRUTH_NOT_EXPR, op); case CPP_AND_AND: /* Refer to the address of a label as a pointer. */ c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { ret.value = finish_label_address_expr (c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else { c_parser_error (parser, "expected identifier"); ret.value = error_mark_node; } ret.original_code = ERROR_MARK; return ret; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_SIZEOF: return c_parser_sizeof_expression (parser); case RID_ALIGNOF: return c_parser_alignof_expression (parser); case RID_EXTENSION: c_parser_consume_token (parser); ext = disable_extension_diagnostics (); ret = c_parser_cast_expression (parser, NULL); restore_extension_diagnostics (ext); return ret; case RID_REALPART: c_parser_consume_token (parser); op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (op); return parser_build_unary_op (REALPART_EXPR, op); case RID_IMAGPART: c_parser_consume_token (parser); op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (op); return parser_build_unary_op (IMAGPART_EXPR, op); default: return c_parser_postfix_expression (parser); } default: return c_parser_postfix_expression (parser); } } /* Parse a sizeof expression. */ static struct c_expr c_parser_sizeof_expression (c_parser *parser) { struct c_expr expr; gcc_assert (c_parser_next_token_is_keyword (parser, RID_SIZEOF)); c_parser_consume_token (parser); skip_evaluation++; in_sizeof++; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* Either sizeof ( type-name ) or sizeof unary-expression starting with a compound literal. */ struct c_type_name *type_name; c_parser_consume_token (parser); type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { struct c_expr ret; skip_evaluation--; in_sizeof--; ret.value = error_mark_node; ret.original_code = ERROR_MARK; return ret; } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type (parser, type_name); goto sizeof_expr; } /* sizeof ( type-name ). */ skip_evaluation--; in_sizeof--; if (type_name->declarator->kind == cdk_array && type_name->declarator->u.array.vla_unspec_p) { /* C99 6.7.5.2p4 */ error ("%<[*]%> not allowed in other than a declaration"); } return c_expr_sizeof_type (type_name); } else { expr = c_parser_unary_expression (parser); sizeof_expr: skip_evaluation--; in_sizeof--; if (TREE_CODE (expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1))) error ("%<sizeof%> applied to a bit-field"); return c_expr_sizeof_expr (expr); } } /* Parse an alignof expression. */ static struct c_expr c_parser_alignof_expression (c_parser *parser) { struct c_expr expr; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNOF)); c_parser_consume_token (parser); skip_evaluation++; in_alignof++; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* Either __alignof__ ( type-name ) or __alignof__ unary-expression starting with a compound literal. */ struct c_type_name *type_name; struct c_expr ret; c_parser_consume_token (parser); type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { struct c_expr ret; skip_evaluation--; in_alignof--; ret.value = error_mark_node; ret.original_code = ERROR_MARK; return ret; } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type (parser, type_name); goto alignof_expr; } /* alignof ( type-name ). */ skip_evaluation--; in_alignof--; ret.value = c_alignof (groktypename (type_name)); ret.original_code = ERROR_MARK; return ret; } else { struct c_expr ret; expr = c_parser_unary_expression (parser); alignof_expr: skip_evaluation--; in_alignof--; ret.value = c_alignof_expr (expr.value); ret.original_code = ERROR_MARK; return ret; } } /* Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2). postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( argument-expression-list[opt] ) postfix-expression . identifier postfix-expression -> identifier postfix-expression ++ postfix-expression -- ( type-name ) { initializer-list } ( type-name ) { initializer-list , } argument-expression-list: argument-expression argument-expression-list , argument-expression primary-expression: identifier constant string-literal ( expression ) GNU extensions: primary-expression: __func__ (treated as a keyword in GNU C) __FUNCTION__ __PRETTY_FUNCTION__ ( compound-statement ) __builtin_va_arg ( assignment-expression , type-name ) __builtin_offsetof ( type-name , offsetof-member-designator ) __builtin_choose_expr ( assignment-expression , assignment-expression , assignment-expression ) __builtin_types_compatible_p ( type-name , type-name ) APPLE LOCAL blocks (C++ cf) block-literal-expr offsetof-member-designator: identifier offsetof-member-designator . identifier offsetof-member-designator [ expression ] Objective-C: primary-expression: [ objc-receiver objc-message-args ] @selector ( objc-selector-arg ) @protocol ( identifier ) @encode ( type-name ) objc-string-literal */ static struct c_expr c_parser_postfix_expression (c_parser *parser) { struct c_expr expr, e1, e2, e3; struct c_type_name *t1, *t2; switch (c_parser_peek_token (parser)->type) { case CPP_NUMBER: case CPP_CHAR: case CPP_WCHAR: expr.value = c_parser_peek_token (parser)->value; expr.original_code = ERROR_MARK; c_parser_consume_token (parser); break; case CPP_STRING: case CPP_WSTRING: expr.value = c_parser_peek_token (parser)->value; expr.original_code = STRING_CST; c_parser_consume_token (parser); break; case CPP_OBJC_STRING: gcc_assert (c_dialect_objc ()); expr.value = objc_build_string_object (c_parser_peek_token (parser)->value); expr.original_code = ERROR_MARK; c_parser_consume_token (parser); break; case CPP_NAME: /* APPLE LOCAL begin radar 5277239 */ if (c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME && c_parser_peek_2nd_token (parser)->type == CPP_DOT) { /* CLASS.class_method expression. */ tree receiver, component; receiver = c_parser_objc_receiver (parser); /* consume '.' operator */ c_parser_consume_token (parser); component = c_parser_objc_message_args (parser); expr.value = objc_build_property_reference_expr (receiver, component); expr.original_code = ERROR_MARK; break; } /* APPLE LOCAL end radar 5277239 */ if (c_parser_peek_token (parser)->id_kind != C_ID_ID) { c_parser_error (parser, "expected expression"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } { tree id = c_parser_peek_token (parser)->value; location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); expr.value = build_external_ref (id, (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN), loc); /* APPLE LOCAL begin radar 5732232 - blocks (C++ cd) */ /* If a variabled declared as referenced variable, using |...| syntax, is used in the block, it has to be derefrenced because this variable holds address of the outside variable referenced in. */ /* APPLE LOCAL begin radar 5932809 - copyable byref blocks (C++ cd) */ if (TREE_CODE (expr.value) == VAR_DECL) { if (BLOCK_DECL_BYREF (expr.value)) { tree orig_decl = expr.value; expr.value = build_indirect_ref (expr.value, "unary *"); if (COPYABLE_BYREF_LOCAL_VAR (orig_decl)) { /* What we have is an expression which is of type struct __Block_byref_X. Must get to the value of the variable embedded in this structure. It is at: __Block_byref_X.__forwarding->x */ expr.value = build_byref_local_var_access (expr.value, DECL_NAME (orig_decl)); } } else if (COPYABLE_BYREF_LOCAL_VAR (expr.value)) expr.value = build_byref_local_var_access (expr.value, DECL_NAME (expr.value)); } /* APPLE LOCAL end radar 5932809 - copyable byref blocks */ /* APPLE LOCAL end radar 5732232 - blocks (C++ cd) */ expr.original_code = ERROR_MARK; } break; case CPP_OPEN_PAREN: /* A parenthesized expression, statement expression or compound literal. */ if (c_parser_peek_2nd_token (parser)->type == CPP_OPEN_BRACE) { /* A statement expression. */ tree stmt; c_parser_consume_token (parser); c_parser_consume_token (parser); if (cur_stmt_list == NULL) { error ("braced-group within expression allowed " "only inside a function"); parser->error = true; c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } stmt = c_begin_stmt_expr (); c_parser_compound_statement_nostart (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (pedantic) pedwarn ("ISO C forbids braced-groups within expressions"); expr.value = c_finish_stmt_expr (stmt); expr.original_code = ERROR_MARK; } else if (c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* A compound literal. ??? Can we actually get here rather than going directly to c_parser_postfix_expression_after_paren_type from elsewhere? */ struct c_type_name *type_name; c_parser_consume_token (parser); type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; } else expr = c_parser_postfix_expression_after_paren_type (parser, type_name); } else { /* A parenthesized expression. */ c_parser_consume_token (parser); expr = c_parser_expression (parser); if (TREE_CODE (expr.value) == MODIFY_EXPR) TREE_NO_WARNING (expr.value) = 1; expr.original_code = ERROR_MARK; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } break; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_FUNCTION_NAME: case RID_PRETTY_FUNCTION_NAME: case RID_C99_FUNCTION_NAME: expr.value = fname_decl (c_parser_peek_token (parser)->keyword, c_parser_peek_token (parser)->value); expr.original_code = ERROR_MARK; c_parser_consume_token (parser); break; case RID_VA_ARG: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } e1 = c_parser_expr_no_commas (parser, NULL); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } t1 = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (t1 == NULL) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; } else { expr.value = build_va_arg (e1.value, groktypename (t1)); expr.original_code = ERROR_MARK; } break; case RID_OFFSETOF: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } { tree type = groktypename (t1); tree offsetof_ref; if (type == error_mark_node) offsetof_ref = error_mark_node; else offsetof_ref = build1 (INDIRECT_REF, type, null_pointer_node); /* Parse the second argument to __builtin_offsetof. We must have one identifier, and beyond that we want to accept sub structure and sub array references. */ if (c_parser_next_token_is (parser, CPP_NAME)) { offsetof_ref = build_component_ref (offsetof_ref, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); while (c_parser_next_token_is (parser, CPP_DOT) || c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { if (c_parser_next_token_is (parser, CPP_DOT)) { c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } offsetof_ref = build_component_ref (offsetof_ref, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else { tree idx; c_parser_consume_token (parser); idx = c_parser_expression (parser).value; c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); offsetof_ref = build_array_ref (offsetof_ref, idx); } } } else c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = fold_offsetof (offsetof_ref, NULL_TREE); expr.original_code = ERROR_MARK; } break; case RID_CHOOSE_EXPR: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } e1 = c_parser_expr_no_commas (parser, NULL); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } e2 = c_parser_expr_no_commas (parser, NULL); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } e3 = c_parser_expr_no_commas (parser, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree c; c = fold (e1.value); if (TREE_CODE (c) != INTEGER_CST) error ("first argument to %<__builtin_choose_expr%> not" " a constant"); expr = integer_zerop (c) ? e3 : e2; } break; case RID_TYPES_COMPATIBLE_P: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } t2 = c_parser_type_name (parser); if (t2 == NULL) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree e1, e2; e1 = TYPE_MAIN_VARIANT (groktypename (t1)); e2 = TYPE_MAIN_VARIANT (groktypename (t2)); expr.value = comptypes (e1, e2) ? build_int_cst (NULL_TREE, 1) : build_int_cst (NULL_TREE, 0); expr.original_code = ERROR_MARK; } break; case RID_AT_SELECTOR: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } { tree sel = c_parser_objc_selector_arg (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = objc_build_selector_expr (sel); expr.original_code = ERROR_MARK; } break; case RID_AT_PROTOCOL: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = objc_build_protocol_expr (id); expr.original_code = ERROR_MARK; } break; case RID_AT_ENCODE: /* Extension to support C-structures in the archiver. */ gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.value = error_mark_node; expr.original_code = ERROR_MARK; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); break; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree type = groktypename (t1); expr.value = objc_build_encode_expr (type); expr.original_code = ERROR_MARK; } break; default: c_parser_error (parser, "expected expression"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } break; /* APPLE LOCAL begin radar 5732232 - blocks (C++ cf) */ case CPP_XOR: if (flag_blocks) { expr.value = c_parser_block_literal_expr (parser); expr.original_code = ERROR_MARK; break; } c_parser_error (parser, "expected expression"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; /* APPLE LOCAL end radar 5732232 - blocks (C++ cf) */ case CPP_OPEN_SQUARE: if (c_dialect_objc ()) { tree receiver, args; c_parser_consume_token (parser); receiver = c_parser_objc_receiver (parser); args = c_parser_objc_message_args (parser); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); expr.value = objc_build_message_expr (build_tree_list (receiver, args)); expr.original_code = ERROR_MARK; break; } /* Else fall through to report error. */ default: c_parser_error (parser, "expected expression"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; break; } return c_parser_postfix_expression_after_primary (parser, expr); } /* Parse a postfix expression after a parenthesized type name: the brace-enclosed initializer of a compound literal, possibly followed by some postfix operators. This is separate because it is not possible to tell until after the type name whether a cast expression has a cast or a compound literal, or whether the operand of sizeof is a parenthesized type name or starts with a compound literal. */ static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *parser, struct c_type_name *type_name) { tree type; struct c_expr init; struct c_expr expr; start_init (NULL_TREE, NULL, 0); type = groktypename (type_name); if (type != error_mark_node && C_TYPE_VARIABLE_SIZE (type)) { error ("compound literal has variable size"); type = error_mark_node; } init = c_parser_braced_init (parser, type, false); finish_init (); maybe_warn_string_init (type, init); if (pedantic && !flag_isoc99) pedwarn ("ISO C90 forbids compound literals"); expr.value = build_compound_literal (type, init.value); expr.original_code = ERROR_MARK; return c_parser_postfix_expression_after_primary (parser, expr); } /* Parse a postfix expression after the initial primary or compound literal; that is, parse a series of postfix operators. */ static struct c_expr c_parser_postfix_expression_after_primary (c_parser *parser, struct c_expr expr) { tree ident, idx, exprlist; while (true) { switch (c_parser_peek_token (parser)->type) { case CPP_OPEN_SQUARE: /* Array reference. */ c_parser_consume_token (parser); idx = c_parser_expression (parser).value; c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); expr.value = build_array_ref (expr.value, idx); expr.original_code = ERROR_MARK; break; case CPP_OPEN_PAREN: /* Function call. */ c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) exprlist = NULL_TREE; else exprlist = c_parser_expr_list (parser, true); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = build_function_call (expr.value, exprlist); expr.original_code = ERROR_MARK; break; case CPP_DOT: /* Structure element reference. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr); if (c_parser_next_token_is (parser, CPP_NAME)) ident = c_parser_peek_token (parser)->value; else { c_parser_error (parser, "expected identifier"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; return expr; } c_parser_consume_token (parser); expr.value = build_component_ref (expr.value, ident); expr.original_code = ERROR_MARK; break; case CPP_DEREF: /* Structure element reference. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr); if (c_parser_next_token_is (parser, CPP_NAME)) ident = c_parser_peek_token (parser)->value; else { c_parser_error (parser, "expected identifier"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; return expr; } c_parser_consume_token (parser); expr.value = build_component_ref (build_indirect_ref (expr.value, "->"), ident); expr.original_code = ERROR_MARK; break; case CPP_PLUS_PLUS: /* Postincrement. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr); expr.value = build_unary_op (POSTINCREMENT_EXPR, expr.value, 0); expr.original_code = ERROR_MARK; break; case CPP_MINUS_MINUS: /* Postdecrement. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr); expr.value = build_unary_op (POSTDECREMENT_EXPR, expr.value, 0); expr.original_code = ERROR_MARK; break; default: return expr; } } } /* Parse an expression (C90 6.3.17, C99 6.5.17). expression: assignment-expression expression , assignment-expression */ static struct c_expr c_parser_expression (c_parser *parser) { struct c_expr expr; expr = c_parser_expr_no_commas (parser, NULL); while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_expr next; c_parser_consume_token (parser); next = c_parser_expr_no_commas (parser, NULL); next = default_function_array_conversion (next); expr.value = build_compound_expr (expr.value, next.value); expr.original_code = COMPOUND_EXPR; } return expr; } /* Parse an expression and convert functions or arrays to pointers. */ static struct c_expr c_parser_expression_conv (c_parser *parser) { struct c_expr expr; expr = c_parser_expression (parser); expr = default_function_array_conversion (expr); return expr; } /* Parse a non-empty list of expressions. If CONVERT_P, convert functions and arrays to pointers. nonempty-expr-list: assignment-expression nonempty-expr-list , assignment-expression */ static tree c_parser_expr_list (c_parser *parser, bool convert_p) { struct c_expr expr; tree ret, cur; expr = c_parser_expr_no_commas (parser, NULL); if (convert_p) expr = default_function_array_conversion (expr); ret = cur = build_tree_list (NULL_TREE, expr.value); while (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); expr = c_parser_expr_no_commas (parser, NULL); if (convert_p) expr = default_function_array_conversion (expr); cur = TREE_CHAIN (cur) = build_tree_list (NULL_TREE, expr.value); } return ret; } /* Parse Objective-C-specific constructs. */ /* Parse an objc-class-definition. objc-class-definition: @interface identifier objc-superclass[opt] objc-protocol-refs[opt] objc-class-instance-variables[opt] objc-methodprotolist @end @implementation identifier objc-superclass[opt] objc-class-instance-variables[opt] @interface identifier ( identifier ) objc-protocol-refs[opt] objc-methodprotolist @end @implementation identifier ( identifier ) objc-superclass: : identifier "@interface identifier (" must start "@interface identifier ( identifier ) ...": objc-methodprotolist in the first production may not start with a parenthesized identifier as a declarator of a data definition with no declaration specifiers if the objc-superclass, objc-protocol-refs and objc-class-instance-variables are omitted. */ static void /* APPLE LOCAL radar 4548636 - class attributes. */ c_parser_objc_class_definition (c_parser *parser, tree prefix_attrs) { bool iface_p; tree id1; tree superclass; if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE)) iface_p = true; else if (c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION)) /* APPLE LOCAL begin radar 4548636 - class attributes. */ { if (prefix_attrs) { error ("attributes may not be specified on an implementation"); prefix_attrs = NULL_TREE; } iface_p = false; } /* APPLE LOCAL end radar 4548636 - class attributes. */ else gcc_unreachable (); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { /* APPLE LOCAL radar 4965989 */ tree id2 = NULL_TREE; tree proto = NULL_TREE; c_parser_consume_token (parser); /* APPLE LOCAL begin radar 4965989 */ if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN)) { if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return; } id2 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } /* APPLE LOCAL end radar 4965989 */ c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!iface_p) { /* APPLE LOCAL begin radar 4965989 */ if (id2 == NULL_TREE) { error ("cannot implement anonymous category"); return; } /* APPLE LOCAL end radar 4965989 */ objc_start_category_implementation (id1, id2); return; } if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); /* APPLE LOCAL begin radar 4548636 - class attributes. */ if (prefix_attrs) error ("attributes may not be specified on a category"); /* APPLE LOCAL end radar 4548636 - class attributes. */ objc_start_category_interface (id1, id2, proto); /* APPLE LOCAL C* property (Radar 4436866) (in 4.2 q) */ c_parser_objc_interfacedecllist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface (); return; } if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } superclass = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else superclass = NULL_TREE; if (iface_p) { tree proto = NULL_TREE; if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); /* APPLE LOCAL radar 4548636 - class attributes. */ objc_start_class_interface (id1, superclass, proto, prefix_attrs); } else objc_start_class_implementation (id1, superclass); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) c_parser_objc_class_instance_variables (parser); if (iface_p) { objc_continue_interface (); /* APPLE LOCAL C* property (Radar 4436866) (in 4.2 q) */ c_parser_objc_interfacedecllist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface (); } else { objc_continue_implementation (); return; } } /* APPLE LOCAL begin C* property (Radar 4436866) (in 4.2 s) */ static tree c_parser_objc_eq_identifier (c_parser *parser) { tree id; if (c_parser_next_token_is_not (parser, CPP_EQ)) { c_parser_error (parser, "expected %<=%>"); return NULL_TREE; } /* Consume '=' */ c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return NULL_TREE; } id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); return id; } /* Parse obj-property-attribute. */ static void c_parser_objc_property_attribute (c_parser *parser) { tree id; if (c_parser_peek_token (parser)->type != CPP_KEYWORD) { c_parser_error (parser, "expected a property attribute"); c_parser_consume_token (parser); return; } switch (c_parser_peek_token (parser)->keyword) { case RID_READONLY: c_parser_consume_token (parser); objc_set_property_attr (1, NULL_TREE); break; case RID_GETTER: c_parser_consume_token (parser); id = c_parser_objc_eq_identifier (parser); if (id) objc_set_property_attr (2, id); break; case RID_SETTER: c_parser_consume_token (parser); id = c_parser_objc_eq_identifier (parser); if (id) objc_set_property_attr (3, id); /* Consume the ':' which must always follow the setter name. */ if (c_parser_next_token_is (parser, CPP_COLON)) c_parser_consume_token (parser); break; /* APPLE LOCAL begin radar 4947014 - objc atomic property */ case RID_NONATOMIC: c_parser_consume_token (parser); objc_set_property_attr (13, NULL_TREE); break; /* APPLE LOCAL end radar 4947014 - objc atomic property */ default: c_parser_error (parser, "expected a property attribute"); c_parser_consume_token (parser); } } static void c_parser_objc_property_attrlist (c_parser *parser) { while (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN) && c_parser_next_token_is_not (parser, CPP_EOF)) { c_parser_objc_property_attribute (parser); /* APPLE LOCAL begin radar 6302949 */ if (c_parser_next_token_is_not (parser, CPP_COMMA) && c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN) && c_parser_next_token_is_not (parser, CPP_EOF)) warning (0, "property attributes must be separated by a comma"); /* APPLE LOCAL end radar 6302949 */ if (c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_NAME) /* error */) c_parser_consume_token (parser); } } static void c_parser_objc_property_attr_decl (c_parser *parser) { if (!c_parser_next_token_is (parser, CPP_OPEN_PAREN)) return; c_parser_consume_token (parser); c_parser_objc_property_attrlist (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } static tree c_parser_component_decl (c_parser *parser) { tree decl = c_parser_struct_declaration (parser); return decl; } static void c_parser_objc_property_declaration (c_parser *parser) { tree prop; c_parser_require_keyword (parser, RID_AT_PROPERTY, "expected %<@property%>"); objc_property_attr_context = 1; objc_set_property_attr (0, NULL_TREE); c_parser_objc_property_attr_decl (parser); objc_property_attr_context = 0; prop = c_parser_component_decl (parser); /* Comma-separated properties are chained together in reverse order; add them one by one. */ prop = nreverse (prop); for (; prop; prop = TREE_CHAIN (prop)) objc_add_property_variable (copy_node (prop)); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* APPLE LOCAL end C* property (Radar 4436866) (in 4.2 s) */ /* Parse objc-class-instance-variables. objc-class-instance-variables: { objc-instance-variable-decl-list[opt] } objc-instance-variable-decl-list: objc-visibility-spec objc-instance-variable-decl ; ; objc-instance-variable-decl-list objc-visibility-spec objc-instance-variable-decl-list objc-instance-variable-decl ; objc-instance-variable-decl-list ; objc-visibility-spec: @private @protected @public objc-instance-variable-decl: struct-declaration */ static void c_parser_objc_class_instance_variables (c_parser *parser) { gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE)); c_parser_consume_token (parser); while (c_parser_next_token_is_not (parser, CPP_EOF)) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { if (pedantic) pedwarn ("extra semicolon in struct or union specified"); c_parser_consume_token (parser); continue; } /* Stop if at the end of the instance variables. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); break; } /* Parse any objc-visibility-spec. */ if (c_parser_next_token_is_keyword (parser, RID_AT_PRIVATE)) { c_parser_consume_token (parser); objc_set_visibility (2); continue; } else if (c_parser_next_token_is_keyword (parser, RID_AT_PROTECTED)) { c_parser_consume_token (parser); objc_set_visibility (0); continue; } else if (c_parser_next_token_is_keyword (parser, RID_AT_PUBLIC)) { c_parser_consume_token (parser); objc_set_visibility (1); continue; } /* APPLE LOCAL begin radar 4564694 */ else if (c_parser_next_token_is_keyword (parser, RID_AT_PACKAGE)) { c_parser_consume_token (parser); objc_set_visibility (3); continue; } /* APPLE LOCAL end radar 4564694 */ else if (c_parser_next_token_is (parser, CPP_PRAGMA)) { c_parser_pragma (parser, pragma_external); continue; } /* Parse some comma-separated declarations. */ decls = c_parser_struct_declaration (parser); { /* Comma-separated instance variables are chained together in reverse order; add them one by one. */ tree ivar = nreverse (decls); for (; ivar; ivar = TREE_CHAIN (ivar)) objc_add_instance_variable (copy_node (ivar)); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } } /* Parse an objc-class-declaration. objc-class-declaration: @class identifier-list ; */ static void c_parser_objc_class_declaration (c_parser *parser) { tree list = NULL_TREE; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_CLASS)); c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, id)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_class (list); } /* Parse an objc-alias-declaration. objc-alias-declaration: @compatibility_alias identifier identifier ; */ static void c_parser_objc_alias_declaration (c_parser *parser) { tree id1, id2; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_ALIAS)); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); return; } id1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); return; } id2 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_alias (id1, id2); } /* Parse an objc-protocol-definition. objc-protocol-definition: @protocol identifier objc-protocol-refs[opt] objc-methodprotolist @end @protocol identifier-list ; "@protocol identifier ;" should be resolved as "@protocol identifier-list ;": objc-methodprotolist may not start with a semicolon in the first alternative if objc-protocol-refs are omitted. */ static void /* APPLE LOCAL radar 4947311 - protocol attributes */ c_parser_objc_protocol_definition (c_parser *parser, tree attributes) { gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL)); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } if (c_parser_peek_2nd_token (parser)->type == CPP_COMMA || c_parser_peek_2nd_token (parser)->type == CPP_SEMICOLON) { tree list = NULL_TREE; /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, id)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); /* APPLE LOCAL radar 4947311 - protocol attributes */ objc_declare_protocols (list, attributes); } else { tree id = c_parser_peek_token (parser)->value; tree proto = NULL_TREE; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); objc_pq_context = 1; /* APPLE LOCAL radar 4947311 - protocol attributes */ objc_start_protocol (id, proto, attributes); /* APPLE LOCAL C* property (Radar 4436866) (in 4.2 r) */ c_parser_objc_interfacedecllist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); objc_pq_context = 0; objc_finish_interface (); } } /* Parse an objc-method-type. objc-method-type: + - */ static enum tree_code c_parser_objc_method_type (c_parser *parser) { switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: c_parser_consume_token (parser); return PLUS_EXPR; case CPP_MINUS: c_parser_consume_token (parser); return MINUS_EXPR; default: gcc_unreachable (); } } /* Parse an objc-method-definition. objc-method-definition: objc-method-type objc-method-decl ;[opt] compound-statement */ static void c_parser_objc_method_definition (c_parser *parser) { enum tree_code type = c_parser_objc_method_type (parser); tree decl; objc_set_method_type (type); objc_pq_context = 1; decl = c_parser_objc_method_decl (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); if (pedantic) pedwarn ("extra semicolon in method definition specified"); } if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { c_parser_error (parser, "expected %<{%>"); return; } objc_pq_context = 0; /* APPLE LOCAL begin radar 3803157 - objc attribute (in 4.2 a) */ objc_start_method_definition (decl, objc_method_attributes); objc_method_attributes = NULL_TREE; /* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 a) */ add_stmt (c_parser_compound_statement (parser)); objc_finish_method_definition (current_function_decl); } /* APPLE LOCAL begin C* language (in 4.2 w) */ /* True iff the gioven TOKEN starts a methodproto. */ static bool c_token_starts_methodproto (c_token *token) { return token->type == CPP_PLUS || token->type == CPP_MINUS || (token->type == CPP_KEYWORD && (token->keyword == RID_AT_REQUIRED || token->keyword == RID_AT_OPTIONAL)); } /* APPLE LOCAL end C* language (in 4.2 w) */ /* Parse an objc-methodprotolist. objc-methodprotolist: empty objc-methodprotolist objc-methodproto objc-methodprotolist declaration objc-methodprotolist ; The declaration is a data definition, which may be missing declaration specifiers under the same rules and diagnostics as other data definitions outside functions, and the stray semicolon is diagnosed the same way as a stray semicolon outside a function. */ static void /* APPLE LOCAL C* property (Radar 4436866) (in 4.2 b) */ c_parser_objc_interfacedecllist (c_parser *parser) { while (true) { /* APPLE LOCAL begin C* property (Radar 4436866) (in 4.2 b) */ c_token *token; token = c_parser_peek_token (parser); if (token->type == CPP_KEYWORD && token->keyword == RID_AT_PROPERTY) { c_parser_objc_property_declaration (parser); continue; } /* APPLE LOCAL end C* property (Radar 4436866) (in 4.2 b) */ /* APPLE LOCAL begin C* language (in 4.2 w) */ if (c_token_starts_methodproto (token)) { c_parser_objc_methodproto (parser); continue; } /* APPLE LOCAL end C* language (in 4.2 w) */ /* The list is terminated by @end. */ switch (c_parser_peek_token (parser)->type) { case CPP_SEMICOLON: if (pedantic) pedwarn ("ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token (parser); break; /* APPLE LOCAL begin C* language (in 4.2 w) */ /* CPP_PLUS and CPP_MINUS deleted */ /* APPLE LOCAL end C* language (in 4.2 w) */ case CPP_PRAGMA: c_parser_pragma (parser, pragma_external); break; case CPP_EOF: return; default: if (c_parser_next_token_is_keyword (parser, RID_AT_END)) return; /* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */ c_parser_declaration_or_fndef (parser, false, true, false, true, NULL); break; } } } /* Parse an objc-methodproto. objc-methodproto: objc-method-type objc-method-decl ; */ static void c_parser_objc_methodproto (c_parser *parser) { /* APPLE LOCAL C* language */ enum tree_code type; tree decl; /* APPLE LOCAL begin C* language */ if (c_parser_next_token_is_keyword (parser, RID_AT_REQUIRED)) { objc_set_method_opt (0); c_parser_consume_token (parser); return; } if (c_parser_next_token_is_keyword (parser, RID_AT_OPTIONAL)) { objc_set_method_opt (1); c_parser_consume_token (parser); return; } /* APPLE LOCAL begin C* language */ /* APPLE LOCAL C* language */ type = c_parser_objc_method_type (parser); objc_set_method_type (type); /* Remember protocol qualifiers in prototypes. */ objc_pq_context = 1; decl = c_parser_objc_method_decl (parser); /* Forget protocol qualifiers here. */ objc_pq_context = 0; /* APPLE LOCAL begin radar 3803157 - objc attribute (in 4.2 c) */ objc_add_method_declaration (decl, objc_method_attributes); objc_method_attributes = NULL_TREE; /* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 c) */ c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse an objc-method-decl. objc-method-decl: ( objc-type-name ) objc-selector objc-selector ( objc-type-name ) objc-keyword-selector objc-optparmlist objc-keyword-selector objc-optparmlist objc-keyword-selector: objc-keyword-decl objc-keyword-selector objc-keyword-decl objc-keyword-decl: objc-selector : ( objc-type-name ) identifier objc-selector : identifier : ( objc-type-name ) identifier : identifier objc-optparmlist: objc-optparms objc-optellipsis objc-optparms: empty objc-opt-parms , parameter-declaration objc-optellipsis: empty , ... */ static tree c_parser_objc_method_decl (c_parser *parser) { tree type = NULL_TREE; tree sel; tree parms = NULL_TREE; bool ellipsis = false; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); type = c_parser_objc_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } sel = c_parser_objc_selector (parser); /* If there is no selector, or a colon follows, we have an objc-keyword-selector. If there is a selector, and a colon does not follow, that selector ends the objc-method-decl. */ if (!sel || c_parser_next_token_is (parser, CPP_COLON)) { tree tsel = sel; tree list = NULL_TREE; while (true) { /* APPLE LOCAL radar 4157812 */ tree attr = NULL_TREE; tree atype = NULL_TREE, id, keyworddecl; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) break; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); atype = c_parser_objc_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } /* APPLE LOCAL begin radar 4157812 */ if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) attr = c_parser_attributes (parser); /* APPLE LOCAL end radar 4157812 */ if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return error_mark_node; } id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); /* APPLE LOCAL radar 4157812 */ keyworddecl = objc_build_keyword_decl (tsel, atype, id, attr); list = chainon (list, keyworddecl); tsel = c_parser_objc_selector (parser); if (!tsel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } /* APPLE LOCAL begin radar 3803157 - objc attribute (in 4.2 y) */ if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) objc_method_attributes = c_parser_attributes (parser); /* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 y) */ /* Parse the optional parameter list. Optional Objective-C method parameters follow the C syntax, and may include '...' to denote a variable number of arguments. */ parms = make_node (TREE_LIST); while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_parm *parm; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { ellipsis = true; c_parser_consume_token (parser); /* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 y) */ if (objc_method_attributes) error ("method attributes must be specified at the end only"); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) objc_method_attributes = c_parser_attributes (parser); /* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 y) */ break; } parm = c_parser_parameter_declaration (parser, NULL_TREE); if (parm == NULL) break; parms = chainon (parms, build_tree_list (NULL_TREE, grokparm (parm))); } sel = list; } /* APPLE LOCAL begin radar 3803157 - objc attribute (in 4.2 y) */ else { gcc_assert (objc_method_attributes == NULL_TREE); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) objc_method_attributes = c_parser_attributes (parser); } /* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 y) */ /* APPLE LOCAL begin radar 4157812 */ if (sel == NULL) { c_parser_error (parser, "objective-c method declaration is expected"); return error_mark_node; } /* APPLE LOCAL end radar 4157812 */ return objc_build_method_signature (type, sel, parms, ellipsis); } /* Parse an objc-type-name. objc-type-name: objc-type-qualifiers[opt] type-name objc-type-qualifiers[opt] objc-type-qualifiers: objc-type-qualifier objc-type-qualifiers objc-type-qualifier objc-type-qualifier: one of in out inout bycopy byref oneway */ static tree c_parser_objc_type_name (c_parser *parser) { tree quals = NULL_TREE; struct c_type_name *typename = NULL; tree type = NULL_TREE; while (true) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_KEYWORD && (token->keyword == RID_IN || token->keyword == RID_OUT || token->keyword == RID_INOUT || token->keyword == RID_BYCOPY || token->keyword == RID_BYREF || token->keyword == RID_ONEWAY)) { /* APPLE LOCAL radar 4301047 (in 4.2 z) */ quals = chainon (build_tree_list (NULL_TREE, token->value), quals); c_parser_consume_token (parser); } else break; } if (c_parser_next_token_starts_typename (parser)) typename = c_parser_type_name (parser); if (typename) type = groktypename (typename); return build_tree_list (quals, type); } /* Parse objc-protocol-refs. objc-protocol-refs: < identifier-list > */ static tree c_parser_objc_protocol_refs (c_parser *parser) { tree list = NULL_TREE; gcc_assert (c_parser_next_token_is (parser, CPP_LESS)); c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, id)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_require (parser, CPP_GREATER, "expected %<>%>"); return list; } /* Parse an objc-try-catch-statement. objc-try-catch-statement: @try compound-statement objc-catch-list[opt] @try compound-statement objc-catch-list[opt] @finally compound-statement objc-catch-list: @catch ( parameter-declaration ) compound-statement objc-catch-list @catch ( parameter-declaration ) compound-statement */ static void c_parser_objc_try_catch_statement (c_parser *parser) { location_t loc; tree stmt; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_TRY)); c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; stmt = c_parser_compound_statement (parser); objc_begin_try_stmt (loc, stmt); while (c_parser_next_token_is_keyword (parser, RID_AT_CATCH)) { struct c_parm *parm; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) break; /* APPLE LOCAL begin radar 2848255 */ if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { /* @catch (...) */ c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); objc_begin_catch_clause (NULL_TREE); } else { parm = c_parser_parameter_declaration (parser, NULL_TREE); if (parm == NULL) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); break; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); objc_begin_catch_clause (grokparm (parm)); } /* APPLE LOCAL end radar 2848255 */ if (c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) c_parser_compound_statement_nostart (parser); objc_finish_catch_clause (); } if (c_parser_next_token_is_keyword (parser, RID_AT_FINALLY)) { location_t finloc; tree finstmt; c_parser_consume_token (parser); finloc = c_parser_peek_token (parser)->location; finstmt = c_parser_compound_statement (parser); objc_build_finally_clause (finloc, finstmt); } objc_finish_try_stmt (); } /* APPLE LOCAL begin radar 5982990 */ /* This routine is called from c_parser_objc_synchronized_statement and is identical to c_parser_compound_statement with the addition of volatizing local variables seen in the scope of @synchroniz block. */ static tree c_parser_objc_synch_compound_statement (c_parser *parser) { tree stmt; if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) return error_mark_node; stmt = c_begin_compound_stmt (true); c_parser_compound_statement_nostart (parser); if (flag_objc_sjlj_exceptions) objc_mark_locals_volatile (NULL); return c_end_compound_stmt (stmt, true); } /* APPLE LOCAL end radar 5982990 */ /* Parse an objc-synchronized-statement. objc-synchronized-statement: @synchronized ( expression ) compound-statement */ static void c_parser_objc_synchronized_statement (c_parser *parser) { location_t loc; tree expr, stmt; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNCHRONIZED)); c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr = c_parser_expression (parser).value; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else expr = error_mark_node; /* APPLE LOCAL radar 5982990 */ stmt = c_parser_objc_synch_compound_statement (parser); objc_build_synchronized (loc, expr, stmt); } /* Parse an objc-selector; return NULL_TREE without an error if the next token is not an objc-selector. objc-selector: identifier one of enum struct union if else while do for switch case default break continue return goto asm sizeof typeof __alignof unsigned long const short volatile signed restrict _Complex in out inout bycopy byref oneway int char float double void _Bool ??? Why this selection of keywords but not, for example, storage class specifiers? */ static tree c_parser_objc_selector (c_parser *parser) { c_token *token = c_parser_peek_token (parser); tree value = token->value; if (token->type == CPP_NAME) { c_parser_consume_token (parser); return value; } if (token->type != CPP_KEYWORD) return NULL_TREE; switch (token->keyword) { case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_IF: case RID_ELSE: case RID_WHILE: case RID_DO: case RID_FOR: case RID_SWITCH: case RID_CASE: case RID_DEFAULT: case RID_BREAK: case RID_CONTINUE: case RID_RETURN: case RID_GOTO: case RID_ASM: case RID_SIZEOF: case RID_TYPEOF: case RID_ALIGNOF: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_SHORT: case RID_VOLATILE: case RID_SIGNED: case RID_RESTRICT: case RID_COMPLEX: case RID_IN: case RID_OUT: case RID_INOUT: case RID_BYCOPY: case RID_BYREF: case RID_ONEWAY: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_BOOL: c_parser_consume_token (parser); return value; default: return NULL_TREE; } } /* Parse an objc-selector-arg. objc-selector-arg: objc-selector objc-keywordname-list objc-keywordname-list: objc-keywordname objc-keywordname-list objc-keywordname objc-keywordname: objc-selector : : */ static tree c_parser_objc_selector_arg (c_parser *parser) { tree sel = c_parser_objc_selector (parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not (parser, CPP_COLON)) return sel; while (true) { if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) return list; list = chainon (list, build_tree_list (sel, NULL_TREE)); sel = c_parser_objc_selector (parser); if (!sel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } return list; } /* Parse an objc-receiver. objc-receiver: expression class-name type-name */ static tree c_parser_objc_receiver (c_parser *parser) { if (c_parser_peek_token (parser)->type == CPP_NAME && (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME || c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)) { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); return objc_get_class_reference (id); } return c_parser_expression (parser).value; } /* Parse objc-message-args. objc-message-args: objc-selector objc-keywordarg-list objc-keywordarg-list: objc-keywordarg objc-keywordarg-list objc-keywordarg objc-keywordarg: objc-selector : objc-keywordexpr : objc-keywordexpr */ static tree c_parser_objc_message_args (c_parser *parser) { tree sel = c_parser_objc_selector (parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not (parser, CPP_COLON)) return sel; while (true) { tree keywordexpr; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) return list; keywordexpr = c_parser_objc_keywordexpr (parser); list = chainon (list, build_tree_list (sel, keywordexpr)); sel = c_parser_objc_selector (parser); if (!sel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } return list; } /* Parse an objc-keywordexpr. objc-keywordexpr: nonempty-expr-list */ static tree c_parser_objc_keywordexpr (c_parser *parser) { tree list = c_parser_expr_list (parser, true); if (TREE_CHAIN (list) == NULL_TREE) { /* Just return the expression, remove a level of indirection. */ return TREE_VALUE (list); } else { /* We have a comma expression, we will collapse later. */ return list; } } /* Handle pragmas. Some OpenMP pragmas are associated with, and therefore should be considered, statements. ALLOW_STMT is true if we're within the context of a function and such pragmas are to be allowed. Returns true if we actually parsed such a pragma. */ static bool c_parser_pragma (c_parser *parser, enum pragma_context context) { unsigned int id; id = c_parser_peek_token (parser)->pragma_kind; gcc_assert (id != PRAGMA_NONE); switch (id) { case PRAGMA_OMP_BARRIER: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma omp barrier%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_barrier (parser); return false; case PRAGMA_OMP_FLUSH: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma omp flush%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_flush (parser); return false; case PRAGMA_OMP_THREADPRIVATE: c_parser_omp_threadprivate (parser); return false; case PRAGMA_OMP_SECTION: error ("%<#pragma omp section%> may only be used in " "%<#pragma omp sections%> construct"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; case PRAGMA_GCC_PCH_PREPROCESS: c_parser_error (parser, "%<#pragma GCC pch_preprocess%> must be first"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; default: if (id < PRAGMA_FIRST_EXTERNAL) { if (context == pragma_external) { bad_stmt: c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; } c_parser_omp_construct (parser); return true; } break; } c_parser_consume_pragma (parser); c_invoke_pragma_handler (id); /* Skip to EOL, but suppress any error message. Those will have been generated by the handler routine through calling error, as opposed to calling c_parser_error. */ parser->error = true; c_parser_skip_to_pragma_eol (parser); return false; } /* The interface the pragma parsers have to the lexer. */ enum cpp_ttype pragma_lex (tree *value) { c_token *tok = c_parser_peek_token (the_parser); enum cpp_ttype ret = tok->type; *value = tok->value; if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF) ret = CPP_EOF; else { if (ret == CPP_KEYWORD) ret = CPP_NAME; c_parser_consume_token (the_parser); } return ret; } static void c_parser_pragma_pch_preprocess (c_parser *parser) { tree name = NULL; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_STRING)) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else c_parser_error (parser, "expected string literal"); c_parser_skip_to_pragma_eol (parser); if (name) c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name)); } /* OpenMP 2.5 parsing routines. */ /* Returns name of the next clause. If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and the token is not consumed. Otherwise appropriate pragma_omp_clause is returned and the token is consumed. */ static pragma_omp_clause c_parser_omp_clause_name (c_parser *parser) { pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE; if (c_parser_next_token_is_keyword (parser, RID_IF)) result = PRAGMA_OMP_CLAUSE_IF; else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT)) result = PRAGMA_OMP_CLAUSE_DEFAULT; else if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); switch (p[0]) { case 'c': if (!strcmp ("copyin", p)) result = PRAGMA_OMP_CLAUSE_COPYIN; else if (!strcmp ("copyprivate", p)) result = PRAGMA_OMP_CLAUSE_COPYPRIVATE; break; case 'f': if (!strcmp ("firstprivate", p)) result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE; break; case 'l': if (!strcmp ("lastprivate", p)) result = PRAGMA_OMP_CLAUSE_LASTPRIVATE; break; case 'n': if (!strcmp ("nowait", p)) result = PRAGMA_OMP_CLAUSE_NOWAIT; else if (!strcmp ("num_threads", p)) result = PRAGMA_OMP_CLAUSE_NUM_THREADS; break; case 'o': if (!strcmp ("ordered", p)) result = PRAGMA_OMP_CLAUSE_ORDERED; break; case 'p': if (!strcmp ("private", p)) result = PRAGMA_OMP_CLAUSE_PRIVATE; break; case 'r': if (!strcmp ("reduction", p)) result = PRAGMA_OMP_CLAUSE_REDUCTION; break; case 's': if (!strcmp ("schedule", p)) result = PRAGMA_OMP_CLAUSE_SCHEDULE; else if (!strcmp ("shared", p)) result = PRAGMA_OMP_CLAUSE_SHARED; break; } } if (result != PRAGMA_OMP_CLAUSE_NONE) c_parser_consume_token (parser); return result; } /* Validate that a clause of the given type does not already exist. */ static void check_no_duplicate_clause (tree clauses, enum tree_code code, const char *name) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == code) { error ("too many %qs clauses", name); break; } } /* OpenMP 2.5: variable-list: identifier variable-list , identifier If KIND is nonzero, create the appropriate node and install the decl in OMP_CLAUSE_DECL and add the node to the head of the list. If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE; return the list created. */ static tree c_parser_omp_variable_list (c_parser *parser, enum omp_clause_code kind, tree list) { if (c_parser_next_token_is_not (parser, CPP_NAME) || c_parser_peek_token (parser)->id_kind != C_ID_ID) c_parser_error (parser, "expected identifier"); while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { tree t = lookup_name (c_parser_peek_token (parser)->value); if (t == NULL_TREE) undeclared_variable (c_parser_peek_token (parser)->value, c_parser_peek_token (parser)->location); else if (t == error_mark_node) ; else if (kind != 0) { tree u = build_omp_clause (kind); OMP_CLAUSE_DECL (u) = t; OMP_CLAUSE_CHAIN (u) = list; list = u; } else list = tree_cons (t, NULL_TREE, list); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); } return list; } /* Similarly, but expect leading and trailing parenthesis. This is a very common case for omp clauses. */ static tree c_parser_omp_var_list_parens (c_parser *parser, enum tree_code kind, tree list) { if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { list = c_parser_omp_variable_list (parser, kind, list); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } return list; } /* OpenMP 2.5: copyin ( variable-list ) */ static tree c_parser_omp_clause_copyin (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYIN, list); } /* OpenMP 2.5: copyprivate ( variable-list ) */ static tree c_parser_omp_clause_copyprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYPRIVATE, list); } /* OpenMP 2.5: default ( shared | none ) */ static tree c_parser_omp_clause_default (c_parser *parser, tree list) { enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; tree c; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); switch (p[0]) { case 'n': if (strcmp ("none", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_NONE; break; case 's': if (strcmp ("shared", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_SHARED; break; default: goto invalid_kind; } c_parser_consume_token (parser); } else { invalid_kind: c_parser_error (parser, "expected %<none%> or %<shared%>"); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED) return list; check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default"); c = build_omp_clause (OMP_CLAUSE_DEFAULT); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_DEFAULT_KIND (c) = kind; return c; } /* OpenMP 2.5: firstprivate ( variable-list ) */ static tree c_parser_omp_clause_firstprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FIRSTPRIVATE, list); } /* OpenMP 2.5: if ( expression ) */ static tree c_parser_omp_clause_if (c_parser *parser, tree list) { if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree t = c_parser_paren_condition (parser); tree c; check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if"); c = build_omp_clause (OMP_CLAUSE_IF); OMP_CLAUSE_IF_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } else c_parser_error (parser, "expected %<(%>"); return list; } /* OpenMP 2.5: lastprivate ( variable-list ) */ static tree c_parser_omp_clause_lastprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LASTPRIVATE, list); } /* OpenMP 2.5: nowait */ static tree c_parser_omp_clause_nowait (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait"); c = build_omp_clause (OMP_CLAUSE_NOWAIT); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: num_threads ( expression ) */ static tree c_parser_omp_clause_num_threads (c_parser *parser, tree list) { if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { tree c, t = c_parser_expression (parser).value; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2 (LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (c == boolean_true_node) { warning (0, "%<num_threads%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads"); c = build_omp_clause (OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 2.5: ordered */ static tree c_parser_omp_clause_ordered (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered"); c = build_omp_clause (OMP_CLAUSE_ORDERED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: private ( variable-list ) */ static tree c_parser_omp_clause_private (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_PRIVATE, list); } /* OpenMP 2.5: reduction ( reduction-operator : variable-list ) reduction-operator: One of: + * - & ^ | && || */ static tree c_parser_omp_clause_reduction (c_parser *parser, tree list) { if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { enum tree_code code; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: code = PLUS_EXPR; break; case CPP_MULT: code = MULT_EXPR; break; case CPP_MINUS: code = MINUS_EXPR; break; case CPP_AND: code = BIT_AND_EXPR; break; case CPP_XOR: code = BIT_XOR_EXPR; break; case CPP_OR: code = BIT_IOR_EXPR; break; case CPP_AND_AND: code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: code = TRUTH_ORIF_EXPR; break; default: c_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, " "%<^%>, %<|%>, %<&&%>, or %<||%>"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0); return list; } c_parser_consume_token (parser); if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) { tree nl, c; nl = c_parser_omp_variable_list (parser, OMP_CLAUSE_REDUCTION, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_REDUCTION_CODE (c) = code; list = nl; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } return list; } /* OpenMP 2.5: schedule ( schedule-kind ) schedule ( schedule-kind , expression ) schedule-kind: static | dynamic | guided | runtime */ static tree c_parser_omp_clause_schedule (c_parser *parser, tree list) { tree c, t; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; c = build_omp_clause (OMP_CLAUSE_SCHEDULE); if (c_parser_next_token_is (parser, CPP_NAME)) { tree kind = c_parser_peek_token (parser)->value; const char *p = IDENTIFIER_POINTER (kind); switch (p[0]) { case 'd': if (strcmp ("dynamic", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC; break; case 'g': if (strcmp ("guided", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED; break; case 'r': if (strcmp ("runtime", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME; break; default: goto invalid_kind; } } else if (c_parser_next_token_is_keyword (parser, RID_STATIC)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC; else goto invalid_kind; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); t = c_parser_expr_no_commas (parser, NULL).value; if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME) error ("schedule %<runtime%> does not take " "a %<chunk_size%> parameter"); else if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE) OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; else c_parser_error (parser, "expected integer expression"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<,%> or %<)%>"); check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule"); OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: c_parser_error (parser, "invalid schedule kind"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0); return list; } /* OpenMP 2.5: shared ( variable-list ) */ static tree c_parser_omp_clause_shared (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_SHARED, list); } /* Parse all OpenMP clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found; the result of clause default goes in *pdefault. */ static tree c_parser_omp_all_clauses (c_parser *parser, unsigned int mask, const char *where) { tree clauses = NULL; while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { const pragma_omp_clause c_kind = c_parser_omp_clause_name (parser); const char *c_name; tree prev = clauses; switch (c_kind) { case PRAGMA_OMP_CLAUSE_COPYIN: clauses = c_parser_omp_clause_copyin (parser, clauses); c_name = "copyin"; break; case PRAGMA_OMP_CLAUSE_COPYPRIVATE: clauses = c_parser_omp_clause_copyprivate (parser, clauses); c_name = "copyprivate"; break; case PRAGMA_OMP_CLAUSE_DEFAULT: clauses = c_parser_omp_clause_default (parser, clauses); c_name = "default"; break; case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE: clauses = c_parser_omp_clause_firstprivate (parser, clauses); c_name = "firstprivate"; break; case PRAGMA_OMP_CLAUSE_IF: clauses = c_parser_omp_clause_if (parser, clauses); c_name = "if"; break; case PRAGMA_OMP_CLAUSE_LASTPRIVATE: clauses = c_parser_omp_clause_lastprivate (parser, clauses); c_name = "lastprivate"; break; case PRAGMA_OMP_CLAUSE_NOWAIT: clauses = c_parser_omp_clause_nowait (parser, clauses); c_name = "nowait"; break; case PRAGMA_OMP_CLAUSE_NUM_THREADS: clauses = c_parser_omp_clause_num_threads (parser, clauses); c_name = "num_threads"; break; case PRAGMA_OMP_CLAUSE_ORDERED: clauses = c_parser_omp_clause_ordered (parser, clauses); c_name = "ordered"; break; case PRAGMA_OMP_CLAUSE_PRIVATE: clauses = c_parser_omp_clause_private (parser, clauses); c_name = "private"; break; case PRAGMA_OMP_CLAUSE_REDUCTION: clauses = c_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OMP_CLAUSE_SCHEDULE: clauses = c_parser_omp_clause_schedule (parser, clauses); c_name = "schedule"; break; case PRAGMA_OMP_CLAUSE_SHARED: clauses = c_parser_omp_clause_shared (parser, clauses); c_name = "shared"; break; default: c_parser_error (parser, "expected %<#pragma omp%> clause"); goto saw_error; } if (((mask >> c_kind) & 1) == 0 && !parser->error) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error ("%qs is not valid for %qs", c_name, where); } } saw_error: c_parser_skip_to_pragma_eol (parser); return c_finish_omp_clauses (clauses); } /* OpenMP 2.5: structured-block: statement In practice, we're also interested in adding the statement to an outer node. So it is convenient if we work around the fact that c_parser_statement calls add_stmt. */ static tree c_parser_omp_structured_block (c_parser *parser) { tree stmt = push_stmt_list (); c_parser_statement (parser); return pop_stmt_list (stmt); } /* OpenMP 2.5: # pragma omp atomic new-line expression-stmt expression-stmt: x binop= expr | x++ | ++x | x-- | --x binop: +, *, -, /, &, ^, |, <<, >> where x is an lvalue expression with scalar type. */ static void c_parser_omp_atomic (c_parser *parser) { tree lhs, rhs; tree stmt; enum tree_code code; c_parser_skip_to_pragma_eol (parser); lhs = c_parser_unary_expression (parser).value; switch (TREE_CODE (lhs)) { case ERROR_MARK: saw_error: c_parser_skip_to_end_of_block_or_statement (parser); return; case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); code = PLUS_EXPR; rhs = integer_one_node; break; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); code = MINUS_EXPR; rhs = integer_one_node; break; default: switch (c_parser_peek_token (parser)->type) { case CPP_MULT_EQ: code = MULT_EXPR; break; case CPP_DIV_EQ: code = TRUNC_DIV_EXPR; break; case CPP_PLUS_EQ: code = PLUS_EXPR; break; case CPP_MINUS_EQ: code = MINUS_EXPR; break; case CPP_LSHIFT_EQ: code = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: code = RSHIFT_EXPR; break; case CPP_AND_EQ: code = BIT_AND_EXPR; break; case CPP_OR_EQ: code = BIT_IOR_EXPR; break; case CPP_XOR_EQ: code = BIT_XOR_EXPR; break; default: c_parser_error (parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } c_parser_consume_token (parser); rhs = c_parser_expression (parser).value; break; } stmt = c_finish_omp_atomic (code, lhs, rhs); if (stmt != error_mark_node) add_stmt (stmt); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* OpenMP 2.5: # pragma omp barrier new-line */ static void c_parser_omp_barrier (c_parser *parser) { c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); c_finish_omp_barrier (); } /* OpenMP 2.5: # pragma omp critical [(name)] new-line structured-block */ static tree c_parser_omp_critical (c_parser *parser) { tree stmt, name = NULL; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_error (parser, "expected identifier"); } else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) c_parser_error (parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol (parser); stmt = c_parser_omp_structured_block (parser); return c_finish_omp_critical (stmt, name); } /* OpenMP 2.5: # pragma omp flush flush-vars[opt] new-line flush-vars: ( variable-list ) */ static void c_parser_omp_flush (c_parser *parser) { c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) c_parser_omp_var_list_parens (parser, 0, NULL); else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) c_parser_error (parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol (parser); c_finish_omp_flush (); } /* Parse the restricted form of the for statment allowed by OpenMP. The real trick here is to determine the loop control variable early so that we can push a new decl if necessary to make it private. */ static tree c_parser_omp_for_loop (c_parser *parser) { tree decl, cond, incr, save_break, save_cont, body, init; location_t loc; if (!c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_error (parser, "for statement expected"); return NULL; } loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return NULL; /* Parse the initialization declaration or expression. */ if (c_parser_next_token_starts_declspecs (parser)) { /* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */ c_parser_declaration_or_fndef (parser, true, true, true, true, NULL); decl = check_for_loop_decls (); if (decl == NULL) goto error_init; init = decl; } else if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_EQ) { decl = c_parser_postfix_expression (parser).value; c_parser_require (parser, CPP_EQ, "expected %<=%>"); init = c_parser_expr_no_commas (parser, NULL).value; init = build_modify_expr (decl, NOP_EXPR, init); init = c_process_expr_stmt (init); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } else goto error_init; /* Parse the loop condition. */ cond = NULL_TREE; if (c_parser_next_token_is_not (parser, CPP_SEMICOLON)) { cond = c_parser_expression_conv (parser).value; cond = c_objc_common_truthvalue_conversion (cond); if (EXPR_P (cond)) SET_EXPR_LOCATION (cond, input_location); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); /* Parse the increment expression. */ incr = NULL_TREE; if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN)) incr = c_process_expr_stmt (c_parser_expression (parser).value); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); parse_body: save_break = c_break_label; c_break_label = size_one_node; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = push_stmt_list (); add_stmt (c_parser_c99_block_statement (parser)); if (c_cont_label) add_stmt (build1 (LABEL_EXPR, void_type_node, c_cont_label)); body = pop_stmt_list (body); c_break_label = save_break; c_cont_label = save_cont; /* Only bother calling c_finish_omp_for if we havn't already generated an error from the initialization parsing. */ if (decl != NULL && decl != error_mark_node && init != error_mark_node) return c_finish_omp_for (loc, decl, init, cond, incr, body, NULL); return NULL; error_init: c_parser_error (parser, "expected iteration declaration or initialization"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); decl = init = cond = incr = NULL_TREE; goto parse_body; } /* OpenMP 2.5: #pragma omp for for-clause[optseq] new-line for-loop */ #define OMP_FOR_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_ORDERED) \ | (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_for (c_parser *parser) { tree block, clauses, ret; clauses = c_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK, "#pragma omp for"); block = c_begin_compound_stmt (true); ret = c_parser_omp_for_loop (parser); if (ret) OMP_FOR_CLAUSES (ret) = clauses; block = c_end_compound_stmt (block, true); add_stmt (block); return ret; } /* OpenMP 2.5: # pragma omp master new-line structured-block */ static tree c_parser_omp_master (c_parser *parser) { c_parser_skip_to_pragma_eol (parser); return c_finish_omp_master (c_parser_omp_structured_block (parser)); } /* OpenMP 2.5: # pragma omp ordered new-line structured-block */ static tree c_parser_omp_ordered (c_parser *parser) { c_parser_skip_to_pragma_eol (parser); return c_finish_omp_ordered (c_parser_omp_structured_block (parser)); } /* OpenMP 2.5: section-scope: { section-sequence } section-sequence: section-directive[opt] structured-block section-sequence section-directive structured-block */ static tree c_parser_omp_sections_scope (c_parser *parser) { tree stmt, substmt; bool error_suppress = false; location_t loc; if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) { /* Avoid skipping until the end of the block. */ parser->error = false; return NULL_TREE; } stmt = push_stmt_list (); loc = c_parser_peek_token (parser)->location; if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_SECTION) { substmt = push_stmt_list (); while (1) { c_parser_statement (parser); if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION) break; if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; if (c_parser_next_token_is (parser, CPP_EOF)) break; } substmt = pop_stmt_list (substmt); substmt = build1 (OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION (substmt, loc); add_stmt (substmt); } while (1) { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; if (c_parser_next_token_is (parser, CPP_EOF)) break; loc = c_parser_peek_token (parser)->location; if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION) { c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); error_suppress = false; } else if (!error_suppress) { error ("expected %<#pragma omp section%> or %<}%>"); error_suppress = true; } substmt = c_parser_omp_structured_block (parser); substmt = build1 (OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION (substmt, loc); add_stmt (substmt); } c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %<#pragma omp section%> or %<}%>"); substmt = pop_stmt_list (stmt); stmt = make_node (OMP_SECTIONS); TREE_TYPE (stmt) = void_type_node; OMP_SECTIONS_BODY (stmt) = substmt; return add_stmt (stmt); } /* OpenMP 2.5: # pragma omp sections sections-clause[optseq] newline sections-scope */ #define OMP_SECTIONS_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_sections (c_parser *parser) { tree block, clauses, ret; clauses = c_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK, "#pragma omp sections"); block = c_begin_compound_stmt (true); ret = c_parser_omp_sections_scope (parser); if (ret) OMP_SECTIONS_CLAUSES (ret) = clauses; block = c_end_compound_stmt (block, true); add_stmt (block); return ret; } /* OpenMP 2.5: # pragma parallel parallel-clause new-line # pragma parallel for parallel-for-clause new-line # pragma parallel sections parallel-sections-clause new-line */ #define OMP_PARALLEL_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED) \ | (1u << PRAGMA_OMP_CLAUSE_COPYIN) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS)) static tree c_parser_omp_parallel (c_parser *parser) { enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL; const char *p_name = "#pragma omp parallel"; tree stmt, clauses, par_clause, ws_clause, block; unsigned int mask = OMP_PARALLEL_CLAUSE_MASK; if (c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_consume_token (parser); p_kind = PRAGMA_OMP_PARALLEL_FOR; p_name = "#pragma omp parallel for"; mask |= OMP_FOR_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } else if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "sections") == 0) { c_parser_consume_token (parser); p_kind = PRAGMA_OMP_PARALLEL_SECTIONS; p_name = "#pragma omp parallel sections"; mask |= OMP_SECTIONS_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } } clauses = c_parser_omp_all_clauses (parser, mask, p_name); switch (p_kind) { case PRAGMA_OMP_PARALLEL: block = c_begin_omp_parallel (); c_parser_statement (parser); stmt = c_finish_omp_parallel (clauses, block); break; case PRAGMA_OMP_PARALLEL_FOR: block = c_begin_omp_parallel (); c_split_parallel_clauses (clauses, &par_clause, &ws_clause); stmt = c_parser_omp_for_loop (parser); if (stmt) OMP_FOR_CLAUSES (stmt) = ws_clause; stmt = c_finish_omp_parallel (par_clause, block); OMP_PARALLEL_COMBINED (stmt) = 1; break; case PRAGMA_OMP_PARALLEL_SECTIONS: block = c_begin_omp_parallel (); c_split_parallel_clauses (clauses, &par_clause, &ws_clause); stmt = c_parser_omp_sections_scope (parser); if (stmt) OMP_SECTIONS_CLAUSES (stmt) = ws_clause; stmt = c_finish_omp_parallel (par_clause, block); OMP_PARALLEL_COMBINED (stmt) = 1; break; default: gcc_unreachable (); } return stmt; } /* OpenMP 2.5: # pragma omp single single-clause[optseq] new-line structured-block */ #define OMP_SINGLE_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_single (c_parser *parser) { tree stmt = make_node (OMP_SINGLE); TREE_TYPE (stmt) = void_type_node; OMP_SINGLE_CLAUSES (stmt) = c_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK, "#pragma omp single"); OMP_SINGLE_BODY (stmt) = c_parser_omp_structured_block (parser); return add_stmt (stmt); } /* Main entry point to parsing most OpenMP pragmas. */ static void c_parser_omp_construct (c_parser *parser) { enum pragma_kind p_kind; location_t loc; tree stmt; loc = c_parser_peek_token (parser)->location; p_kind = c_parser_peek_token (parser)->pragma_kind; c_parser_consume_pragma (parser); /* For all constructs below except #pragma omp atomic MUST_NOT_THROW catch handlers are needed when exceptions are enabled. */ if (p_kind != PRAGMA_OMP_ATOMIC) c_maybe_initialize_eh (); switch (p_kind) { case PRAGMA_OMP_ATOMIC: c_parser_omp_atomic (parser); return; case PRAGMA_OMP_CRITICAL: stmt = c_parser_omp_critical (parser); break; case PRAGMA_OMP_FOR: stmt = c_parser_omp_for (parser); break; case PRAGMA_OMP_MASTER: stmt = c_parser_omp_master (parser); break; case PRAGMA_OMP_ORDERED: stmt = c_parser_omp_ordered (parser); break; case PRAGMA_OMP_PARALLEL: stmt = c_parser_omp_parallel (parser); break; case PRAGMA_OMP_SECTIONS: stmt = c_parser_omp_sections (parser); break; case PRAGMA_OMP_SINGLE: stmt = c_parser_omp_single (parser); break; default: gcc_unreachable (); } if (stmt) SET_EXPR_LOCATION (stmt, loc); } /* OpenMP 2.5: # pragma omp threadprivate (variable-list) */ static void c_parser_omp_threadprivate (c_parser *parser) { tree vars, t; c_parser_consume_pragma (parser); vars = c_parser_omp_var_list_parens (parser, 0, NULL); if (!targetm.have_tls) sorry ("threadprivate variables not supported in this target"); /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); /* If V had already been marked threadprivate, it doesn't matter whether it had been used prior to this point. */ if (TREE_USED (v) && !C_DECL_THREADPRIVATE_P (v)) error ("%qE declared %<threadprivate%> after first use", v); else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v)) error ("automatic variable %qE cannot be %<threadprivate%>", v); else if (! COMPLETE_TYPE_P (TREE_TYPE (v))) error ("%<threadprivate%> %qE has incomplete type", v); else { if (! DECL_THREAD_LOCAL_P (v)) { DECL_TLS_MODEL (v) = decl_default_tls_model (v); /* If rtl has been already set for this var, call make_decl_rtl once again, so that encode_section_info has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P (v)) make_decl_rtl (v); } C_DECL_THREADPRIVATE_P (v) = 1; } } c_parser_skip_to_pragma_eol (parser); } /* Parse a single source file. */ void c_parse_file (void) { /* Use local storage to begin. If the first token is a pragma, parse it. If it is #pragma GCC pch_preprocess, then this will load a PCH file which will cause garbage collection. */ c_parser tparser; memset (&tparser, 0, sizeof tparser); the_parser = &tparser; if (c_parser_peek_token (&tparser)->pragma_kind == PRAGMA_GCC_PCH_PREPROCESS) c_parser_pragma_pch_preprocess (&tparser); the_parser = GGC_NEW (c_parser); *the_parser = tparser; c_parser_translation_unit (the_parser); the_parser = NULL; } /* APPLE LOCAL begin radar 5732232 - blocks (C++ ce) */ /* APPLE LOCAL begin radar 6300081 */ /* This function builds a "generic" block struct type, to be passed into the debug information for blocks pointers, to allow gdb to find the actual function pointer for the block. Any time the Blocks structure layout changes, this may also need to change. Currently a block pointer is a pointer to a __block_literal_n struct, the third field of which is a pointer to a __block_descriptor struct, whose third field is the function pointer. There are other fields as well, but these are the ones gdb needs to know about to find the function pointer. Therefore a generic block struct currently looks like this: struct __block_literal_generic { void * __isa; int __flags; int __reserved; void (*__FuncPtr)(void *); struct __block_descriptor { unsigned long int reserved; unsigned long int Size; } *__descriptor; }; IF AT ANY TIME THE STRUCTURE OF A __BLOCK_LITERAL_N CHANGES, THIS MUST BE CHANGED ALSO!! */ tree /* APPLE LOCAL radar 6353006 */ c_build_generic_block_struct_type (void) { tree field_decl_chain; tree field_decl; tree block_struct_type; push_to_top_level (); block_struct_type = start_struct (RECORD_TYPE, get_identifier ("__block_literal_generic")); field_decl = build_decl (FIELD_DECL, get_identifier ("__isa"), ptr_type_node); field_decl_chain = field_decl; field_decl = build_decl (FIELD_DECL, get_identifier ("__flags"), integer_type_node); chainon (field_decl_chain, field_decl); field_decl = build_decl (FIELD_DECL, get_identifier ("__reserved"), integer_type_node); chainon (field_decl_chain, field_decl); /* void *__FuncPtr; */ field_decl = build_decl (FIELD_DECL, get_identifier ("__FuncPtr"), ptr_type_node); chainon (field_decl_chain, field_decl); field_decl = build_decl (FIELD_DECL, get_identifier ("__descriptor"), build_block_descriptor_type (false)); chainon (field_decl_chain, field_decl); TYPE_BLOCK_IMPL_STRUCT (block_struct_type) = 1; finish_struct (block_struct_type, field_decl_chain, NULL_TREE); pop_from_top_level (); return block_struct_type; } /* APPLE LOCAL end radar 6300081 */ /* APPLE LOCAL begin radar 5847213 - radar 6329245 */ /** build_block_struct_type - struct __block_literal_n { void *__isa; // initialized to &_NSConcreteStackBlock or &_NSConcreteGlobalBlock int __flags; int __reserved; void *__FuncPtr; struct __block_descriptor { unsigned long int reserved; // NULL unsigned long int Size; // sizeof(struct __block_literal_n) // optional helper functions void *CopyFuncPtr; // When BLOCK_HAS_COPY_DISPOSE void *DestroyFuncPtr; // When BLOCK_HAS_COPY_DISPOSE } *__descriptor; // imported variables int x; // ref variable list ... int *y; // byref variable list }; */ static tree build_block_struct_type (struct block_sema_info * block_impl) { tree field_decl_chain, field_decl, chain; char buffer[32]; static int unique_count; tree block_struct_type; /* Check and see if this block is required to have a Copy/Dispose helper function. If yes, set BlockHasCopyDispose to TRUE. */ for (chain = block_impl->block_ref_decl_list; chain; chain = TREE_CHAIN (chain)) if (block_requires_copying (TREE_VALUE (chain))) { block_impl->BlockHasCopyDispose = TRUE; break; } /* Further check to see that we have __block variables which require Copy/Dispose helpers. */ for (chain = block_impl->block_byref_decl_list; chain; chain = TREE_CHAIN (chain)) if (COPYABLE_BYREF_LOCAL_VAR (TREE_VALUE (chain))) { block_impl->BlockHasCopyDispose = TRUE; break; } sprintf(buffer, "__block_literal_%d", ++unique_count); push_to_top_level (); block_struct_type = start_struct (RECORD_TYPE, get_identifier (buffer)); /* void *__isa; */ field_decl = build_decl (FIELD_DECL, get_identifier ("__isa"), ptr_type_node); field_decl_chain = field_decl; /* int __flags */ field_decl = build_decl (FIELD_DECL, get_identifier ("__flags"), integer_type_node); chainon (field_decl_chain, field_decl); /* int __reserved */ field_decl = build_decl (FIELD_DECL, get_identifier ("__reserved"), integer_type_node); chainon (field_decl_chain, field_decl); /* void *__FuncPtr; */ field_decl = build_decl (FIELD_DECL, get_identifier ("__FuncPtr"), ptr_type_node); chainon (field_decl_chain, field_decl); /* struct __block_descriptor *__descriptor */ field_decl = build_decl (FIELD_DECL, get_identifier ("__descriptor"), build_block_descriptor_type (block_impl->BlockHasCopyDispose)); chainon (field_decl_chain, field_decl); if (block_impl->BlockHasCopyDispose) { /* If inner block of a nested block has BlockHasCopyDispose, so does its outer block. */ if (block_impl->prev_block_info) block_impl->prev_block_info->BlockHasCopyDispose = TRUE; } /* int x; // ref variable list ... */ for (chain = block_impl->block_ref_decl_list; chain; chain = TREE_CHAIN (chain)) { tree p = TREE_VALUE (chain); /* Note! const-ness of copied in variable must not be carried over to the type of the synthesized struct field. It prevents to assign to this field when copy constructor is synthesized. */ field_decl = build_decl (FIELD_DECL, DECL_NAME (p), c_build_qualified_type (TREE_TYPE (p), TYPE_UNQUALIFIED)); chainon (field_decl_chain, field_decl); } /* int *y; // byref variable list */ for (chain = block_impl->block_byref_decl_list; chain; chain = TREE_CHAIN (chain)) { tree p = TREE_VALUE (chain); field_decl = build_decl (FIELD_DECL, DECL_NAME (p), TREE_TYPE (p)); chainon (field_decl_chain, field_decl); } pop_from_top_level (); finish_struct (block_struct_type, field_decl_chain, NULL_TREE); return block_struct_type; } /** build_descriptor_block_decl - This routine builds a static block_descriptior variable of type: struct __block_descriptor; and initializes it to: {0, sizeof(struct literal_block_n), copy_helper_block_1, // only if block BLOCK_HAS_COPY_DISPOSE destroy_helper_block_1, // only if block BLOCK_HAS_COPY_DISPOSE } */ static tree build_descriptor_block_decl (tree block_struct_type, struct block_sema_info *block_impl) { extern tree create_tmp_var_raw (tree, const char *); static int desc_unique_count; int size; tree helper_addr, fields; tree decl, constructor, initlist; tree exp, bind; char name [32]; tree descriptor_type = TREE_TYPE (build_block_descriptor_type (block_impl->BlockHasCopyDispose)); sprintf (name, "__block_descriptor_tmp_%d", ++desc_unique_count); decl = create_tmp_var_raw (descriptor_type, name); DECL_CONTEXT (decl) = NULL_TREE; DECL_ARTIFICIAL (decl) = 1; /* Initialize "reserved" field to 0 for now. */ fields = TYPE_FIELDS (descriptor_type); initlist = build_tree_list (fields, build_int_cst (long_unsigned_type_node, 0)); fields = TREE_CHAIN (fields); /* Initialize "Size" field. */ size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (block_struct_type)); initlist = tree_cons (fields, build_int_cst (long_unsigned_type_node, size), initlist); if (block_impl->BlockHasCopyDispose) { /* Initialize "CopyFuncPtr" and "DestroyFuncPtr" fields. */ /* Helpers were previously generated completeley as a nested function (and context was required for code gen.) But they are not, so context must be set to NULL so initialization logic does not complain. */ DECL_CONTEXT (block_impl->copy_helper_func_decl) = NULL_TREE; fields = TREE_CHAIN (fields); helper_addr = build_fold_addr_expr (block_impl->copy_helper_func_decl); helper_addr = convert (ptr_type_node, helper_addr); initlist = tree_cons (fields, helper_addr, initlist); DECL_CONTEXT (block_impl->destroy_helper_func_decl) = NULL_TREE; fields = TREE_CHAIN (fields); helper_addr = build_fold_addr_expr (block_impl->destroy_helper_func_decl); helper_addr = convert (ptr_type_node, helper_addr); initlist = tree_cons (fields, helper_addr, initlist); } constructor = build_constructor_from_list (descriptor_type, nreverse (initlist)); TREE_CONSTANT (constructor) = 1; TREE_STATIC (constructor) = 1; TREE_READONLY (constructor) = 1; DECL_INITIAL (decl) = constructor; exp = build_stmt (DECL_EXPR, decl); bind = build3 (BIND_EXPR, void_type_node, decl, exp, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); TREE_PUBLIC (decl) = 0; TREE_STATIC (decl) = 1; finish_decl (decl, constructor, NULL_TREE); return decl; } /** build_block_struct_initlist - builds the initializer list: { &_NSConcreteStackBlock or &_NSConcreteGlobalBlock // __isa, BLOCK_USE_STRET | BLOCK_HAS_COPY_DISPOSE | BLOCK_IS_GLOBAL // __flags, 0, // __reserved &helper_1, // __FuncPtr, &static_descriptor_variable // __descriptor, x, // user variables. &y ... } */ static tree build_block_struct_initlist (tree block_struct_type, struct block_sema_info *block_impl) { tree initlist, helper_addr; tree chain, fields; /* APPLE LOCAL radar 7735196 */ unsigned int flags = 0; static tree NSConcreteStackBlock_decl = NULL_TREE; static tree NSConcreteGlobalBlock_decl = NULL_TREE; tree descriptor_block_decl = build_descriptor_block_decl (block_struct_type, block_impl); if (block_impl->BlockHasCopyDispose) /* Note! setting of this flag merely indicates to the runtime that we have destroy_helper_block/copy_helper_block helper routines. */ flags |= BLOCK_HAS_COPY_DISPOSE; /* APPLE LOCAL begin radar 7735196 */ if (block_impl->return_type && aggregate_value_p(block_impl->return_type, 0)) flags |= BLOCK_USE_STRET; /* APPLE LOCAL end 7735196 */ fields = TYPE_FIELDS (block_struct_type); /* APPLE LOCAL begin radar 6230297 */ if (!current_function_decl || (block_impl->block_ref_decl_list == NULL_TREE && block_impl->block_byref_decl_list == NULL_TREE)) /* APPLE LOCAL end radar 6230297 */ { /* This is a global block. */ /* Find an existing declaration for _NSConcreteGlobalBlock or declare extern void *_NSConcreteGlobalBlock; */ if (NSConcreteGlobalBlock_decl == NULL_TREE) { tree name_id = get_identifier("_NSConcreteGlobalBlock"); NSConcreteGlobalBlock_decl = lookup_name (name_id); if (!NSConcreteGlobalBlock_decl) { NSConcreteGlobalBlock_decl = build_decl (VAR_DECL, name_id, ptr_type_node); DECL_EXTERNAL (NSConcreteGlobalBlock_decl) = 1; TREE_PUBLIC (NSConcreteGlobalBlock_decl) = 1; pushdecl_top_level (NSConcreteGlobalBlock_decl); rest_of_decl_compilation (NSConcreteGlobalBlock_decl, 0, 0); } } /* APPLE LOCAL begin radar 6457359 */ initlist = build_tree_list (fields, convert (ptr_type_node, build_fold_addr_expr (NSConcreteGlobalBlock_decl))); /* APPLE LOCAL end radar 6457359 */ flags |= BLOCK_IS_GLOBAL; } else { /* Find an existing declaration for _NSConcreteStackBlock or declare extern void *_NSConcreteStackBlock; */ if (NSConcreteStackBlock_decl == NULL_TREE) { tree name_id = get_identifier("_NSConcreteStackBlock"); NSConcreteStackBlock_decl = lookup_name (name_id); if (!NSConcreteStackBlock_decl) { NSConcreteStackBlock_decl = build_decl (VAR_DECL, name_id, ptr_type_node); DECL_EXTERNAL (NSConcreteStackBlock_decl) = 1; TREE_PUBLIC (NSConcreteStackBlock_decl) = 1; pushdecl_top_level (NSConcreteStackBlock_decl); rest_of_decl_compilation (NSConcreteStackBlock_decl, 0, 0); } } /* APPLE LOCAL begin radar 6457359 */ initlist = build_tree_list (fields, convert (ptr_type_node, build_fold_addr_expr (NSConcreteStackBlock_decl))); /* APPLE LOCAL end radar 6457359 */ } fields = TREE_CHAIN (fields); /* __flags */ initlist = tree_cons (fields, build_int_cst (integer_type_node, flags), initlist); fields = TREE_CHAIN (fields); /* __reserved */ initlist = tree_cons (fields, build_int_cst (integer_type_node, 0), initlist); fields = TREE_CHAIN (fields); /* __FuncPtr */ helper_addr = build_fold_addr_expr (block_impl->helper_func_decl); helper_addr = convert (ptr_type_node, helper_addr); initlist = tree_cons (fields, helper_addr, initlist); fields = TREE_CHAIN (fields); /* __descriptor */ /* APPLE LOCAL begin radar 6457359 */ initlist = tree_cons (fields, build_fold_addr_expr (descriptor_block_decl), initlist); /* APPLE LOCAL end radar 6457359 */ for (chain = block_impl->block_original_ref_decl_list; chain; chain = TREE_CHAIN (chain)) { tree y = TREE_VALUE (chain); TREE_USED (y) = 1; fields = TREE_CHAIN (fields); initlist = tree_cons (fields, y, initlist); } for (chain = block_impl->block_byref_decl_list; chain; chain = TREE_CHAIN (chain)) { tree y = lookup_name (DECL_NAME (TREE_VALUE (chain))); tree forwarding_expr; gcc_assert (y); TREE_USED (y) = 1; if (COPYABLE_BYREF_LOCAL_VAR (y)) { /* For variables declared __block, either the original one at the point of declaration or the imported version (which is initialized in the helper function's prologue) is used to initilize the byref variable field in the temporary. */ if (TREE_CODE (TREE_TYPE (y)) != RECORD_TYPE) y = build_indirect_ref (y, "unary *"); /* We will be using the __block_struct_variable.__forwarding as the initializer. */ forwarding_expr = build_component_ref (y, get_identifier ("__forwarding")); } else /* Global variable is always assumed passed by its address. */ forwarding_expr = build_fold_addr_expr (y); fields = TREE_CHAIN (fields); initlist = tree_cons (fields, forwarding_expr, initlist); } return initlist; } /** build_block_literal_tmp - This routine: 1) builds block type: struct __block_literal_n { void *__isa; // initialized to &_NSConcreteStackBlock or &_NSConcreteGlobalBlock int __flags; int __reserved; void *__FuncPtr struct __block_descriptor { unsigned long int reserved; // NULL unsigned long int Size; // sizeof(struct Block_literal_1) // optional helper functions void *CopyFuncPtr; // When BLOCK_HAS_COPY_DISPOSE void *DestroyFuncPtr; // When BLOCK_HAS_COPY_DISPOSE } *__descriptor; // imported variables int x; // ref variable list ... int *y; // byref variable list }; 2) build function prototype: double helper_1(struct __block_literal_n *ii, int z); 3) build the temporary initialization: struct __block_literal_n I = { &_NSConcreteStackBlock or &_NSConcreteGlobalBlock // __isa, BLOCK_USE_STRET | BLOCK_HAS_COPY_DISPOSE | BLOCK_IS_GLOBAL // __flags, 0, // __reserved &helper_1, // __FuncPtr &static_descriptor_variable // __descriptor, x, // user variables. &y ... }; It return the temporary. */ static tree build_block_literal_tmp (const char *name, struct block_sema_info * block_impl) { extern tree create_tmp_var_raw (tree, const char *); tree block_holder_tmp_decl; tree constructor, initlist; tree exp, bind; tree block_struct_type = TREE_TYPE (block_impl->block_arg_ptr_type); /* APPLE LOCAL begin radar 6230297 */ bool staticBlockTmp = (block_impl->block_ref_decl_list == NULL_TREE && block_impl->block_byref_decl_list == NULL_TREE); block_holder_tmp_decl = create_tmp_var_raw (block_struct_type, name); /* Context will not be known until when the literal is synthesized. This is more so in the case of nested block literal blocks. */ DECL_CONTEXT (block_holder_tmp_decl) = staticBlockTmp ? NULL_TREE : current_function_decl; /* In the new ABI, helper function decl. is the initializer for the descriptor variable which is always declared static. So, it must have no context; otherwise, gcc thinks that it requires trampoline! when address of this function is used as initializer. */ DECL_CONTEXT (block_impl->helper_func_decl) = NULL_TREE; /* APPLE LOCAL end radar 6230297 */ DECL_ARTIFICIAL (block_holder_tmp_decl) = 1; initlist = build_block_struct_initlist (block_struct_type, block_impl); initlist = nreverse (initlist); constructor = build_constructor_from_list (block_struct_type, initlist); TREE_CONSTANT (constructor) = 1; TREE_STATIC (constructor) = 1; TREE_READONLY (constructor) = 1; DECL_INITIAL (block_holder_tmp_decl) = constructor; exp = build_stmt (DECL_EXPR, block_holder_tmp_decl); bind = build3 (BIND_EXPR, void_type_node, block_holder_tmp_decl, exp, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); /* Temporary representing a global block is made global static. */ /* APPLE LOCAL radar 6230297 */ if (staticBlockTmp || global_bindings_p ()) { TREE_PUBLIC (block_holder_tmp_decl) = 0; TREE_STATIC (block_holder_tmp_decl) = 1; finish_decl (block_holder_tmp_decl, constructor, NULL_TREE); } return block_holder_tmp_decl; } /* APPLE LOCAL end radar 5847213 - radar 6329245 */ static tree clean_and_exit (tree block) { pop_function_context (); free (finish_block (block)); return error_mark_node; } /** synth_copy_helper_block_func - This function synthesizes void copy_helper_block (struct block* _dest, struct block *_src) function. */ static void synth_copy_helper_block_func (struct block_sema_info * block_impl) { tree stmt, chain, fnbody; tree dst_arg, src_arg; struct c_arg_info * arg_info; /* Set up: (struct block* _dest, struct block *_src) parameters. */ dst_arg = build_decl (PARM_DECL, get_identifier ("_dst"), block_impl->block_arg_ptr_type); DECL_CONTEXT (dst_arg) = cur_block->copy_helper_func_decl; TREE_USED (dst_arg) = 1; DECL_ARG_TYPE (dst_arg) = block_impl->block_arg_ptr_type; src_arg = build_decl (PARM_DECL, get_identifier ("_src"), block_impl->block_arg_ptr_type); /* APPLE LOCAL radar 5847213 */ DECL_CONTEXT (src_arg) = cur_block->copy_helper_func_decl; TREE_USED (src_arg) = 1; DECL_ARG_TYPE (src_arg) = block_impl->block_arg_ptr_type; arg_info = xcalloc (1, sizeof (struct c_arg_info)); TREE_CHAIN (dst_arg) = src_arg; arg_info->parms = dst_arg; arg_info->types = tree_cons (NULL_TREE, block_impl->block_arg_ptr_type, tree_cons (NULL_TREE, block_impl->block_arg_ptr_type, NULL_TREE)); /* function header synthesis. */ push_function_context (); start_block_helper_function (cur_block->copy_helper_func_decl); store_parm_decls_from (arg_info); /* Body of the function. */ stmt = c_begin_compound_stmt (true); for (chain = block_impl->block_ref_decl_list; chain; chain = TREE_CHAIN (chain)) if (block_requires_copying (TREE_VALUE (chain))) { /* APPLE LOCAL begin radar 6175959 */ int flag; tree call_exp; tree p = TREE_VALUE (chain); tree dst_block_component, src_block_component; dst_block_component = build_component_ref (build_indirect_ref (dst_arg, "->"), DECL_NAME (p)); src_block_component = build_component_ref (build_indirect_ref (src_arg, "->"), DECL_NAME (p)); if (TREE_CODE (TREE_TYPE (p)) == BLOCK_POINTER_TYPE) /* _Block_object_assign(&_dest->myImportedBlock, _src->myImportedClosure, BLOCK_FIELD_IS_BLOCK) */ flag = BLOCK_FIELD_IS_BLOCK; else /* _Block_object_assign(&_dest->myImportedBlock, _src->myImportedClosure, BLOCK_FIELD_IS_OBJECT) */ flag = BLOCK_FIELD_IS_OBJECT; dst_block_component = build_fold_addr_expr (dst_block_component); call_exp = build_block_object_assign_call_exp (dst_block_component, src_block_component, flag); add_stmt (call_exp); /* APPLE LOCAL end radar 6175959 */ } /* For each __block declared variable must generate call to: _Block_object_assign(&_dest->myImportedBlock, _src->myImportedBlock, BLOCK_FIELD_IS_BYREF [|BLOCK_FIELD_IS_WEAK]) */ for (chain = block_impl->block_byref_decl_list; chain; chain = TREE_CHAIN (chain)) if (COPYABLE_BYREF_LOCAL_VAR (TREE_VALUE (chain))) { int flag = BLOCK_FIELD_IS_BYREF; tree call_exp; tree p = TREE_VALUE (chain); tree dst_block_component, src_block_component; dst_block_component = build_component_ref (build_indirect_ref (dst_arg, "->"), DECL_NAME (p)); src_block_component = build_component_ref (build_indirect_ref (src_arg, "->"), DECL_NAME (p)); /* _Block_object_assign(&_dest->myImportedClosure, _src->myImportedClosure, BLOCK_FIELD_IS_BYREF [|BLOCK_FIELD_IS_WEAK]) */ if (COPYABLE_WEAK_BLOCK (p)) flag |= BLOCK_FIELD_IS_WEAK; dst_block_component = build_fold_addr_expr (dst_block_component); call_exp = build_block_object_assign_call_exp (dst_block_component, src_block_component, flag); add_stmt (call_exp); } fnbody = c_end_compound_stmt (stmt, true); add_stmt (fnbody); finish_function (); pop_function_context (); free (arg_info); } static void synth_destroy_helper_block_func (struct block_sema_info * block_impl) { tree stmt, chain, fnbody; tree src_arg; struct c_arg_info * arg_info; /* Set up: (struct block *_src) parameter. */ src_arg = build_decl (PARM_DECL, get_identifier ("_src"), block_impl->block_arg_ptr_type); TREE_USED (src_arg) = 1; DECL_ARG_TYPE (src_arg) = block_impl->block_arg_ptr_type; arg_info = xcalloc (1, sizeof (struct c_arg_info)); arg_info->parms = src_arg; arg_info->types = tree_cons (NULL_TREE, block_impl->block_arg_ptr_type, NULL_TREE); /* function header synthesis. */ push_function_context (); start_block_helper_function (cur_block->destroy_helper_func_decl); store_parm_decls_from (arg_info); /* Body of the function. */ stmt = c_begin_compound_stmt (true); for (chain = block_impl->block_ref_decl_list; chain; chain = TREE_CHAIN (chain)) if (block_requires_copying (TREE_VALUE (chain))) { int flag; tree rel_exp; tree p = TREE_VALUE (chain); tree src_block_component; src_block_component = build_component_ref (build_indirect_ref (src_arg, "->"), DECL_NAME (p)); if (TREE_CODE (TREE_TYPE (p)) == BLOCK_POINTER_TYPE) /* _Block_object_dispose(_src->imported_object_0, BLOCK_FIELD_IS_BLOCK); */ flag = BLOCK_FIELD_IS_BLOCK; else /* _Block_object_dispose(_src->imported_object_0, BLOCK_FIELD_IS_OBJECT); */ flag = BLOCK_FIELD_IS_OBJECT; rel_exp = build_block_object_dispose_call_exp (src_block_component, flag); add_stmt (rel_exp); } /* For each __block declared variable must generate call to: _Block_object_dispose(_src->myImportedClosure, BLOCK_FIELD_IS_BYREF[|BLOCK_FIELD_IS_WEAK]) */ for (chain = block_impl->block_byref_decl_list; chain; chain = TREE_CHAIN (chain)) if (COPYABLE_BYREF_LOCAL_VAR (TREE_VALUE (chain))) { tree call_exp; int flag = BLOCK_FIELD_IS_BYREF; tree p = TREE_VALUE (chain); tree src_block_component; src_block_component = build_component_ref (build_indirect_ref (src_arg, "->"), DECL_NAME (p)); if (COPYABLE_WEAK_BLOCK (p)) flag |= BLOCK_FIELD_IS_WEAK; /* _Block_object_dispose(_src->myImportedClosure, BLOCK_FIELD_IS_BYREF[|BLOCK_FIELD_IS_WEAK]) */ call_exp = build_block_object_dispose_call_exp (src_block_component, flag); add_stmt (call_exp); } fnbody = c_end_compound_stmt (stmt, true); add_stmt (fnbody); finish_function (); pop_function_context (); free (arg_info); } /* Parse a block-id. GNU Extension: block-id: specifier-qualifier-list block-declarator Returns the DECL specified or implied. */ static tree c_parser_block_id (c_parser* parser) { struct c_declspecs *specs = build_null_declspecs (); struct c_declarator *declarator; bool dummy = false; c_parser_declspecs (parser, specs, false, true, true); if (!specs->declspecs_seen_p) { c_parser_error (parser, "expected specifier-qualifier-list"); return NULL; } pending_xref_error (); finish_declspecs (specs); declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_BLOCK, &dummy); if (declarator == NULL) return NULL; return grokblockdecl (specs, declarator); } /* Parse a block-literal-expr. GNU Extension: block-literal-expr: ^ parameter-declation-clause exception-specification [opt] compound-statement ^ block-id compound-statement It synthesizes the helper function for later generation and builds the necessary data to represent the block literal where it is declared. */ static tree c_parser_block_literal_expr (c_parser* parser) { char name [32]; static int global_unique_count; int unique_count = ++global_unique_count; tree block_helper_function_decl; tree expr, body, type, arglist = void_list_node, ftype; tree self_arg, stmt; struct c_arg_info *args = NULL; tree arg_type = void_list_node; struct block_sema_info *block_impl; tree tmp; bool open_paren_seen = false; tree restype; tree fnbody, typelist; tree helper_function_type; tree block; /* APPLE LOCAL radar 6185344 */ tree declared_block_return_type = NULL_TREE; /* APPLE LOCAL radar 6237713 */ tree attributes = NULL_TREE; c_parser_consume_token (parser); /* eat '^' */ /* APPLE LOCAL begin radar 6237713 */ if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) attributes = c_parser_attributes (parser); /* APPLE LOCAL end radar 6237713 */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { /* Parse the optional argument list */ c_parser_consume_token (parser); /* Open the scope to collect parameter decls */ push_scope (); args = c_parser_parms_declarator (parser, true, NULL_TREE); /* Check for args as it might be NULL due to error. */ if (args) { arglist = args->parms; arg_type = args->types; } else { pop_scope (); return error_mark_node; } open_paren_seen = true; pop_scope (); } else if (c_parser_next_token_is_not (parser, CPP_OPEN_BRACE)) { /* Parse user declared return type. */ tree decl; /* APPLE LOCAL begin radar 6237713 */ if (attributes) { warning (0, "attributes before block type are ignored"); attributes = NULL_TREE; } /* APPLE LOCAL end radar 6237713 */ decl = c_parser_block_id (parser); if (decl && decl != error_mark_node) { arg_type = TYPE_ARG_TYPES (TREE_TYPE (decl)); arglist = DECL_ARGUMENTS (decl); declared_block_return_type = TREE_TYPE (TREE_TYPE (decl)); } } block = begin_block (); cur_block->arg_info = NULL; if (declared_block_return_type) { cur_block->return_type = TYPE_MAIN_VARIANT (declared_block_return_type); cur_block->block_has_return_type = true; } else cur_block->return_type = NULL_TREE; if (args) cur_block->arg_info = args; else cur_block->arg_info = xcalloc (1, sizeof (struct c_arg_info)); if (declared_block_return_type) { cur_block->arg_info->parms = arglist; cur_block->arg_info->types = arg_type; } /* Must also build hidden parameter .block_descriptor added to the helper function, even though we do not know its type yet. */ /* APPLE LOCAL radar 6404979 */ self_arg = build_decl (PARM_DECL, get_identifier (".block_descriptor"), ptr_type_node); TREE_USED (self_arg) = 1; /* Prevent unused parameter '.block_descriptor' warning. */ TREE_CHAIN (self_arg) = cur_block->arg_info->parms; cur_block->arg_info->types = tree_cons (NULL_TREE, ptr_type_node, arg_type); cur_block->arg_info->parms = self_arg; /* APPLE LOCAL begin radar 6185344 */ /* Build the declaration of the helper function (if we do not know its result type yet, assume it is 'void'. If user provided it, use it). Treat this as a nested function and use nested function infrastructure for its generation. */ ftype = build_function_type ((!cur_block->block_has_return_type ? void_type_node : cur_block->return_type), cur_block->arg_info->types); /* APPLE LOCAL end radar 6185344 */ /* APPLE LOCAL radar 6160536 - radar 6411649 */ block_helper_function_decl = build_helper_func_decl (build_block_helper_name (0), ftype); DECL_CONTEXT (block_helper_function_decl) = current_function_decl; cur_block->helper_func_decl = block_helper_function_decl; push_function_context (); start_block_helper_function (cur_block->helper_func_decl); /* Set block's scope to the scope of the helper function's main body. This is primarily used when nested blocks are declared. */ /* FIXME: Name of objc_get_current_scope needs to get changed. */ cur_block->the_scope = (struct c_scope*)objc_get_current_scope (); /* Enter parameter list to the scope of the helper function. */ store_parm_decls_from (cur_block->arg_info); /* APPLE LOCAL begin radar 6237713 */ if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) attributes = c_parser_attributes (parser); /* APPLE LOCAL radar 6246527 */ any_recognized_block_attribute (attributes); decl_attributes (&cur_block->helper_func_decl, attributes, 0); /* APPLE LOCAL end radar 6237713 */ /* Start parsing body or expression part of the block literal. */ if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { tree save_c_break_label = c_break_label; tree save_c_cont_label = c_cont_label; /* Indicate no valid break/continue context by setting these variables to some non-null, non-label value. We'll notice and emit the proper error message in c_finish_bc_stmt. */ c_break_label = c_cont_label = size_zero_node; c_parser_consume_token (parser); /* Consure '{'. */ stmt = c_begin_compound_stmt (true); c_parser_compound_statement_nostart (parser); c_cont_label = save_c_cont_label; c_break_label = save_c_break_label; } else { struct c_expr expr; stmt = c_begin_compound_stmt (true); error ("blocks require { }"); expr = c_parser_cast_expression (parser, NULL); body = expr.value; if (body == error_mark_node) return clean_and_exit (block); if (cur_block->return_type) { error ("return not allowed in block expression literal"); return clean_and_exit (block); } else if (!open_paren_seen) { error ("argument list is required for block expression literals"); return clean_and_exit (block); } else { tree restype = TYPE_MAIN_VARIANT (TREE_TYPE (body)); add_stmt (body); TREE_TYPE (current_function_decl) = build_function_type (restype, TYPE_ARG_TYPES (TREE_TYPE (current_function_decl))); TREE_TYPE (DECL_RESULT (current_function_decl)) = restype; relayout_decl (DECL_RESULT (current_function_decl)); cur_block->return_type = restype; } } cur_block->block_arg_ptr_type = build_pointer_type (build_block_struct_type (cur_block)); restype = !cur_block->return_type ? void_type_node : cur_block->return_type; if (restype == error_mark_node) return clean_and_exit (block); /* Now that we know type of the hidden .block_descriptor argument, fix its type. */ TREE_TYPE (self_arg) = cur_block->block_arg_ptr_type; DECL_ARG_TYPE (self_arg) = cur_block->block_arg_ptr_type; /* The DECL_RESULT should already have the correct type by now. */ gcc_assert (TREE_TYPE (DECL_RESULT (current_function_decl)) == restype); cur_block->block_body = stmt; block_build_prologue (cur_block); fnbody = c_end_compound_stmt (stmt, true); add_stmt (fnbody); /* We are done parsing of the block body. Return type of block is now known. We also know all we need to know about the helper function. So, fix its type here. */ /* We moved this here because for global blocks, helper function body is not nested and is gimplified in call to finish_function() and return type of the function must be correct. */ ftype = build_function_type (restype, arg_type); /* Declare helper function; as in: double helper_1(struct block_1 *ii, int z); */ typelist = TYPE_ARG_TYPES (ftype); /* (struct block_1 *ii, int z, ...) */ typelist = tree_cons (NULL_TREE, cur_block->block_arg_ptr_type, typelist); helper_function_type = build_function_type (TREE_TYPE (ftype), typelist); TREE_TYPE (cur_block->helper_func_decl) = helper_function_type; finish_function (); pop_function_context (); /* Build the declaration for copy_helper_block and destroy_helper_block helper functions for later use. */ if (cur_block->BlockHasCopyDispose) { /* void copy_helper_block (struct block*, struct block *); */ tree s_ftype = build_function_type (void_type_node, tree_cons (NULL_TREE, cur_block->block_arg_ptr_type, tree_cons (NULL_TREE, cur_block->block_arg_ptr_type, void_list_node))); sprintf (name, "__copy_helper_block_%d", unique_count); cur_block->copy_helper_func_decl = build_helper_func_decl (get_identifier (name), s_ftype); synth_copy_helper_block_func (cur_block); /* void destroy_helper_block (struct block*); */ s_ftype = build_function_type (void_type_node, tree_cons (NULL_TREE, cur_block->block_arg_ptr_type, void_list_node)); sprintf (name, "__destroy_helper_block_%d", unique_count); cur_block->destroy_helper_func_decl = build_helper_func_decl (get_identifier (name), s_ftype); synth_destroy_helper_block_func (cur_block); } block_impl = finish_block (block); /* Build unqiue name of the temporary used in code gen. */ sprintf (name, "__block_holder_tmp_%d", unique_count); tmp = build_block_literal_tmp (name, block_impl); tmp = build_fold_addr_expr (tmp); type = build_block_pointer_type (ftype); expr = convert (type, convert (ptr_type_node, tmp)); free (block_impl); return expr; } /* APPLE LOCAL end radar 5732232 - blocks (C++ ce) */ #include "gt-c-parser.h"
MINDSSCbox.h
void boxfilter(float *input, float *temp1, float *temp2, int hw, int m, int n, int o) { int sz = m * n * o; for (int i = 0; i < sz; i++) { temp1[i] = input[i]; } for (int k = 0; k < o; k++) { for (int j = 0; j < n; j++) { for (int i = 1; i < m; i++) { temp1[i + j * m + k * m * n] += temp1[(i - 1) + j * m + k * m * n]; } } } for (int k = 0; k < o; k++) { for (int j = 0; j < n; j++) { for (int i = 0; i < (hw + 1); i++) { temp2[i + j * m + k * m * n] = temp1[(i + hw) + j * m + k * m * n]; } for (int i = (hw + 1); i < (m - hw); i++) { temp2[i + j * m + k * m * n] = temp1[(i + hw) + j * m + k * m * n] - temp1[(i - hw - 1) + j * m + k * m * n]; } for (int i = (m - hw); i < m; i++) { temp2[i + j * m + k * m * n] = temp1[(m - 1) + j * m + k * m * n] - temp1[(i - hw - 1) + j * m + k * m * n]; } } } for (int k = 0; k < o; k++) { for (int j = 1; j < n; j++) { for (int i = 0; i < m; i++) { temp2[i + j * m + k * m * n] += temp2[i + (j - 1) * m + k * m * n]; } } } for (int k = 0; k < o; k++) { for (int i = 0; i < m; i++) { for (int j = 0; j < (hw + 1); j++) { temp1[i + j * m + k * m * n] = temp2[i + (j + hw) * m + k * m * n]; } for (int j = (hw + 1); j < (n - hw); j++) { temp1[i + j * m + k * m * n] = temp2[i + (j + hw) * m + k * m * n] - temp2[i + (j - hw - 1) * m + k * m * n]; } for (int j = (n - hw); j < n; j++) { temp1[i + j * m + k * m * n] = temp2[i + (n - 1) * m + k * m * n] - temp2[i + (j - hw - 1) * m + k * m * n]; } } } for (int k = 1; k < o; k++) { for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { temp1[i + j * m + k * m * n] += temp1[i + j * m + (k - 1) * m * n]; } } } for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { for (int k = 0; k < (hw + 1); k++) { input[i + j * m + k * m * n] = temp1[i + j * m + (k + hw) * m * n]; } for (int k = (hw + 1); k < (o - hw); k++) { input[i + j * m + k * m * n] = temp1[i + j * m + (k + hw) * m * n] - temp1[i + j * m + (k - hw - 1) * m * n]; } for (int k = (o - hw); k < o; k++) { input[i + j * m + k * m * n] = temp1[i + j * m + (o - 1) * m * n] - temp1[i + j * m + (k - hw - 1) * m * n]; } } } } void imshift(float *input, float *output, int dx, int dy, int dz, int m, int n, int o) { for (int k = 0; k < o; k++) { for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { if (i + dy >= 0 && i + dy < m && j + dx >= 0 && j + dx < n && k + dz >= 0 && k + dz < o) output[i + j * m + k * m * n] = input[i + dy + (j + dx) * m + (k + dz) * m * n]; else output[i + j * m + k * m * n] = input[i + j * m + k * m * n]; } } } } /*void *distances(void *threadarg) { struct mind_data *my_data; my_data = (struct mind_data *) threadarg; float* im1=my_data->im1; float* d1=my_data->d1; int qs=my_data->qs; int ind_d1=my_data->ind_d1; int m=image_m; int n=image_n; int o=image_o;*/ void distances(float *im1, float *d1, int m, int n, int o, int qs, int l) { int sz1 = m * n * o; float *w1 = new float[sz1]; int len1 = 6; float *temp1 = new float[sz1]; float *temp2 = new float[sz1]; int dx[6] = {+qs, +qs, -qs, +0, +qs, +0}; int dy[6] = {+qs, -qs, +0, -qs, +0, +qs}; int dz[6] = {0, +0, +qs, +qs, +qs, +qs}; imshift(im1, w1, dx[l], dy[l], dz[l], m, n, o); for (int i = 0; i < sz1; i++) { w1[i] = (w1[i] - im1[i]) * (w1[i] - im1[i]); } boxfilter(w1, temp1, temp2, qs, m, n, o); for (int i = 0; i < sz1; i++) { d1[i + l * sz1] = w1[i]; } delete temp1; delete temp2; delete w1; } //__builtin_popcountll(left[i]^right[i]); absolute hamming distances void descriptor(uint64_t *mindq, float *im1, int m, int n, int o, int qs) { // MIND with self-similarity context int dx[6] = {+qs, +qs, -qs, +0, +qs, +0}; int dy[6] = {+qs, -qs, +0, -qs, +0, +qs}; int dz[6] = {0, +0, +qs, +qs, +qs, +qs}; int sx[12] = {-qs, +0, -qs, +0, +0, +qs, +0, +0, +0, -qs, +0, +0}; int sy[12] = {+0, -qs, +0, +qs, +0, +0, +0, +qs, +0, +0, +0, -qs}; int sz[12] = {+0, +0, +0, +0, -qs, +0, -qs, +0, -qs, +0, -qs, +0}; int index[12] = {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5}; float sigma = 0.75; // 1.0;//0.75;//1.5; int rho = ceil(sigma * 1.5) * 2 + 1; int len1 = 6; const int len2 = 12; image_d = 12; int d = 12; int sz1 = m * n * o; //============== DISTANCES USING BOXFILTER =================== float *d1 = new float[sz1 * len1]; auto time1 = chrono::steady_clock::now(); #pragma omp parallel for for (int l = 0; l < len1; l++) { distances(im1, d1, m, n, o, qs, l); } auto time2 = chrono::steady_clock::now(); float timeMIND1 = chrono::duration_cast<chrono::duration<float>>(time2 - time1).count(); time1 = chrono::steady_clock::now(); // quantisation table const int val = 6; const uint64_t power = 32; #pragma omp parallel for for (int k = 0; k < o; k++) { unsigned int tablei[6] = {0, 1, 3, 7, 15, 31}; float compare[val - 1]; for (int i = 0; i < val - 1; i++) { compare[i] = -log((i + 1.5f) / val); } float mind1[12]; for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { for (int l = 0; l < len2; l++) { if (i + sy[l] >= 0 && i + sy[l] < m && j + sx[l] >= 0 && j + sx[l] < n && k + sz[l] >= 0 && k + sz[l] < o) { mind1[l] = d1[i + sy[l] + (j + sx[l]) * m + (k + sz[l]) * m * n + index[l] * sz1]; } else { mind1[l] = d1[i + j * m + k * m * n + index[l] * sz1]; } } float minval = *min_element(mind1, mind1 + len2); float sumnoise = 0.0f; for (int l = 0; l < len2; l++) { mind1[l] -= minval; sumnoise += mind1[l]; } float noise1 = max(sumnoise / (float)len2, 1e-6f); for (int l = 0; l < len2; l++) { mind1[l] /= noise1; } uint64_t accum = 0; uint64_t tabled1 = 1; for (int l = 0; l < len2; l++) { // mind1[l]=exp(-mind1[l]); int mind1val = 0; for (int c = 0; c < val - 1; c++) { mind1val += compare[c] > mind1[l] ? 1 : 0; } // int mind1val=min(max((int)(mind1[l]*val-0.5f),0),val-1); accum += tablei[mind1val] * tabled1; tabled1 *= power; } mindq[i + j * m + k * m * n] = accum; } } } time2 = chrono::steady_clock::now(); float timeMIND2 = chrono::duration_cast<chrono::duration<float>>(time2 - time1).count(); delete d1; }
DRB092-threadprivatemissing2-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A file-scope variable used within a function called by a parallel region. No threadprivate is used to avoid data races. This is the case for a variable referenced within a construct. Data race pairs sum0@68:7 vs. sum0@68:12 sum0@68:7 vs. sum0@68:7 */ #include <stdio.h> #include <assert.h> int sum0=0, sum1=0; //#pragma omp threadprivate(sum0) int main() { int i, sum=0; { #pragma omp parallel for private(i ) reduction(+:sum0) for (i=1;i<=1000;i++) { sum0=sum0+i; } } sum= sum+sum0; /* reference calculation */ #pragma omp parallel for private(i ) reduction(+:sum1) for (i=1;i<=1000;i++) { sum1=sum1+i; } printf("sum=%d; sum1=%d\n",sum,sum1); // assert(sum==sum1); return 0; }
GB_binop__rminus_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint64) // A*D function (colscale): GB (_AxD__rminus_uint64) // D*A function (rowscale): GB (_DxB__rminus_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint64) // C=scalar+B GB (_bind1st__rminus_uint64) // C=scalar+B' GB (_bind1st_tran__rminus_uint64) // C=A+scalar GB (_bind2nd__rminus_uint64) // C=A'+scalar GB (_bind2nd_tran__rminus_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT64 || GxB_NO_RMINUS_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
diagsm_x_csr_n_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_Number diag[A->rows]; memset(diag, '\0', A->rows * sizeof(ALPHA_Number)); int num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT r = 0; r < A->rows; r++) { for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++) { ALPHA_INT ac = A->col_indx[ai]; if (ac == r) { diag[r] = A->values[ai]; } } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT r = 0; r < A->rows; ++r) { for (ALPHA_INT c = 0; c < columns; ++c) { ALPHA_Number t; alpha_setzero(t); alpha_mul(t, alpha, x[index2(r, c, ldx)]); alpha_div(y[index2(r, c, ldy)], t, diag[r]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
q1.c
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <omp.h> //static long int num_steps = 10000000; double f(double x) { double ans = 4.0/(1+x*x); return(ans); } int main() { int j; for(j=3;j<=8;j++){ long int num_steps = pow(10,j); double dx = 1.0/(double)num_steps; double sum = 0; double net_start = omp_get_wtime(); #pragma omp serial { int id = omp_get_thread_num(); double thread_start=omp_get_wtime(); int i; double partial = 0; for(i=0;i<num_steps;i++) { double x = (i+0.5)*dx; partial+=f(x); } partial*=dx; double thread_elapsed = -1*(thread_start-omp_get_wtime()); #pragma omp critical sum+=partial; } double net_elapsed = omp_get_wtime() - net_start; double pi = sum; printf("pi %0.9lf net_elapsed %lf\n",pi,net_elapsed); } }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H #include <android/log.h> #define LOG_TAG "GENEARLMATRIDXMATRIX" #define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__) #define THREAD_OPT #ifdef THREAD_OPT #include <future> #include <vector> #endif using namespace std; namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper; LhsMapper lhs(_lhs,lhsStride); RhsMapper rhs(_rhs,rhsStride); ResMapper res(_res, resStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // LOGI("frank info"); // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); // LOGI("frank tid %d, threads %d",tid, threads); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // LOGI("frank eigen start"); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; vector<shared_future<void>> vec; // printf("rows %d, cols %d, mc %d, kc %d, nc %d depth %d\n", rows, cols, mc, kc, nc, depth); // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // vec.emplace_back(async(launch::async, [&]{ // gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); // })); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } // for (auto it:vec) { // it.wait(); // } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::evalTo(dst, lhs, rhs); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::addTo(dst, lhs, rhs); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::subTo(dst, lhs, rhs); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
diffusion_grid.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef DIFFUSION_GRID_H_ #define DIFFUSION_GRID_H_ #include <assert.h> #include <Rtypes.h> #include <algorithm> #include <array> #include <cmath> #include <functional> #include <iostream> #include <string> #include <vector> #include "math_util.h" #include "param.h" namespace bdm { /// A class that computes the diffusion of extracellular substances /// It maintains the concentration and gradient of a single substance class DiffusionGrid { public: explicit DiffusionGrid(TRootIOCtor* p) {} DiffusionGrid(int substance_id, std::string substance_name, double dc, double mu, int resolution = 10) : substance_(substance_id), substance_name_(substance_name), dc_({{1 - dc, dc / 6, dc / 6, dc / 6, dc / 6, dc / 6, dc / 6}}), mu_(mu), resolution_(resolution) {} virtual ~DiffusionGrid() {} /// @brief Initializes the grid by calculating the grid dimensions /// and number of boxes along the axis from the input arguments /// /// @param[in] grid_dimensions The grid dimensions /// @param[in] box_length The box length /// void Initialize(const std::array<int32_t, 6>& grid_dimensions) { // Get grid properties from neighbor grid grid_dimensions_ = grid_dimensions; assert(resolution_ > 0 && "The resolution cannot be zero!"); num_boxes_axis_[0] = resolution_; num_boxes_axis_[1] = resolution_; num_boxes_axis_[2] = resolution_; box_length_ = (grid_dimensions_[1] - grid_dimensions_[0]) / static_cast<double>(resolution_); box_volume_ = box_length_ * box_length_ * box_length_; assert(box_length_ > 0 && "Box length of diffusion grid must be greater than zero!"); // Set the parity of the number of boxes along the dimensions (since all // dimensions are the same, we just take the x-axis here) parity_ = num_boxes_axis_[0] % 2; total_num_boxes_ = num_boxes_axis_[0] * num_boxes_axis_[1] * num_boxes_axis_[2]; // Allocate memory for the concentration and gradient arrays c1_.resize(total_num_boxes_); c2_.resize(total_num_boxes_); gradients_.resize(3 * total_num_boxes_); initialized_ = true; } void RunInitializers() { assert(num_boxes_axis_[0] > 0 && "The number of boxes along an axis was found to be zero!"); if (initializers_.empty()) { return; } auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; // Apply all functions that initialize this diffusion grid for (size_t f = 0; f < initializers_.size(); f++) { for (size_t x = 0; x < nx; x++) { double real_x = grid_dimensions_[0] + x * box_length_; for (size_t y = 0; y < ny; y++) { double real_y = grid_dimensions_[2] + y * box_length_; for (size_t z = 0; z < nz; z++) { double real_z = grid_dimensions_[4] + z * box_length_; std::array<uint32_t, 3> box_coord; box_coord[0] = x; box_coord[1] = y; box_coord[2] = z; size_t idx = GetBoxIndex(box_coord); IncreaseConcentrationBy(idx, initializers_[f](real_x, real_y, real_z)); } } } } // Clear the initializer to free up space initializers_.clear(); initializers_.shrink_to_fit(); } /// @brief Updates the grid dimensions, based on the given threshold /// values. The diffusion grid dimensions need always be larger /// than the neighbor grid dimensions, so that each simulation /// object can obtain its local concentration / gradient /// /// @param[in] threshold_dimensions The threshold values /// void Update(const std::array<int32_t, 2>& threshold_dimensions) { // Update the grid dimensions such that each dimension ranges from // {treshold_dimensions[0] - treshold_dimensions[1]} auto min_gd = threshold_dimensions[0]; auto max_gd = threshold_dimensions[1]; grid_dimensions_ = {min_gd, max_gd, min_gd, max_gd, min_gd, max_gd}; // If the grid is not perfectly divisible along each dimension by the // box length, extend the grid so that it is int dimension_length = max_gd - min_gd; for (int i = 0; i < 3; i++) { int r = fmod(dimension_length, box_length_); if (r > 1e-9) { // std::abs for the case that box_length_ > dimension_length grid_dimensions_[2 * i + 1] += (box_length_ - r); } } // Calculate by how many boxes each dimension has grown int new_dimension_length = grid_dimensions_[1] - grid_dimensions_[0]; int new_num_boxes = std::ceil(new_dimension_length / box_length_); int growth = new_num_boxes - num_boxes_axis_[0]; if (growth > 0) { // Store the old number of boxes along each axis for comparison std::array<size_t, 3> tmp_num_boxes_axis = num_boxes_axis_; // Increase number of boxes along axis accordingly num_boxes_axis_[0] += growth; num_boxes_axis_[1] += growth; num_boxes_axis_[2] += growth; // We need to maintain the parity of the number of boxes along each // dimension, otherwise copying of the substances to the increases grid // will not be symmetrically done; resulting in shifting of boxes // We add a box in the negative direction, because the only way the parity // could have changed is because of adding a box in the positive direction // (due to the grid not being perfectly divisible; see above) if (num_boxes_axis_[0] % 2 != parity_) { for (int i = 0; i < 3; i++) { grid_dimensions_[2 * i] -= box_length_; num_boxes_axis_[i]++; } } // Temporarily save previous grid data std::vector<double> tmp_c1 = c1_; std::vector<double> tmp_gradients = gradients_; c1_.clear(); c2_.clear(); gradients_.clear(); total_num_boxes_ = num_boxes_axis_[0] * num_boxes_axis_[1] * num_boxes_axis_[2]; CopyOldData(tmp_c1, tmp_gradients, tmp_num_boxes_axis); assert(total_num_boxes_ >= tmp_num_boxes_axis[0] * tmp_num_boxes_axis[1] * tmp_num_boxes_axis[2] && "The diffusion grid tried to shrink! It can only become larger"); } } /// Copies the concentration and gradients values to the new /// (larger) grid. In the 2D case it looks like the following: /// /// [0 0 0 0] /// [v1 v2] --> [0 v1 v2 0] /// [v3 v4] --> [0 v3 v4 0] /// [0 0 0 0] /// /// The dimensions are doubled in this case from 2x2 to 4x4 /// If the dimensions would be increased from 2x2 to 3x3, it will still /// be increased to 4x4 in order for GetBoxIndex to function correctly /// void CopyOldData(const std::vector<double>& old_c1, const std::vector<double>& old_gradients, const std::array<size_t, 3>& old_num_boxes_axis) { // Allocate more memory for the grid data arrays c1_.resize(total_num_boxes_); c2_.resize(total_num_boxes_); gradients_.resize(3 * total_num_boxes_); auto incr_dim_x = num_boxes_axis_[0] - old_num_boxes_axis[0]; auto incr_dim_y = num_boxes_axis_[1] - old_num_boxes_axis[1]; auto incr_dim_z = num_boxes_axis_[2] - old_num_boxes_axis[2]; int off_x = incr_dim_x / 2; int off_y = incr_dim_y / 2; int off_z = incr_dim_z / 2; int num_box_xy = num_boxes_axis_[0] * num_boxes_axis_[1]; int old_box_xy = old_num_boxes_axis[0] * old_num_boxes_axis[1]; int new_origin = off_z * (num_boxes_axis_[0] * num_boxes_axis_[1]) + off_y * num_boxes_axis_[0] + off_x; for (size_t k = 0; k < old_num_boxes_axis[2]; k++) { int offset = new_origin + k * num_box_xy; for (size_t j = 0; j < old_num_boxes_axis[1]; j++) { if (j != 0) { offset += num_boxes_axis_[0]; } for (size_t i = 0; i < old_num_boxes_axis[0]; i++) { auto idx = k * old_box_xy + j * old_num_boxes_axis[0] + i; c1_[offset + i] = old_c1[idx]; gradients_[3 * (offset + i)] = old_gradients[3 * idx]; gradients_[3 * (offset + i) + 1] = old_gradients[3 * idx + 1]; gradients_[3 * (offset + i) + 2] = old_gradients[3 * idx + 2]; } } } } /// Solves a 5-point stencil diffusion equation, with leaking-edge /// boundary conditions. Substances are allowed to leave the simulation /// space. This prevents building up concentration at the edges /// void DiffuseWithLeakingEdge() { int nx = num_boxes_axis_[0]; int ny = num_boxes_axis_[1]; int nz = num_boxes_axis_[2]; #define YBF 16 #pragma omp parallel for collapse(2) for (int yy = 0; yy < ny; yy += YBF) { for (int z = 0; z < nz; z++) { // To let the edges bleed we set some diffusion coefficients // to zero. This prevents substance building up at the edges auto dc_2_ = dc_; int ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (int y = yy; y < ymax; y++) { dc_2_ = dc_; int x; int c, n, s, b, t; x = 0; c = x + y * nx + z * nx * ny; if (y == 0) { n = c; dc_2_[4] = 0; } else { n = c - nx; } if (y == (ny - 1)) { s = c; dc_2_[3] = 0; } else { s = c + nx; } if (z == 0) { b = c; dc_2_[5] = 0; } else { b = c - nx * ny; } if (z == (nz - 1)) { t = c; dc_2_[6] = 0; } else { t = c + nx * ny; } // x = 0; we leak out substances past this edge (so multiply by 0) c2_[c] = (dc_2_[0] * c1_[c] + 0 * c1_[c] + dc_2_[2] * c1_[c + 1] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_2_[0] * c1_[c] + dc_2_[1] * c1_[c - 1] + dc_2_[2] * c1_[c + 1] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; // x = nx-1; we leak out substances past this edge (so multiply by 0) c2_[c] = (dc_2_[0] * c1_[c] + dc_2_[1] * c1_[c - 1] + 0 * c1_[c] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } /// Solves a 5-point stencil diffusion equation, with closed-edge /// boundary conditions. Substances are not allowed to leave the simulation /// space. Keep in mind that the concentration can build up at the edges /// void DiffuseWithClosedEdge() { auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x; int c, n, s, b, t; x = 0; c = x + y * nx + z * nx * ny; n = (y == 0) ? c : c - nx; s = (y == ny - 1) ? c : c + nx; b = (z == 0) ? c : c - nx * ny; t = (z == nz - 1) ? c : c + nx * ny; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c] + dc_[2] * c1_[c + 1] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c - 1] + dc_[2] * c1_[c + 1] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c - 1] + dc_[2] * c1_[c] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } void DiffuseEuler() { const auto nx = num_boxes_axis_[0]; const auto ny = num_boxes_axis_[1]; const auto nz = num_boxes_axis_[2]; const double ibl2 = 1 / (box_length_ * box_length_); const double d = 1 - dc_[0]; // TODO(ahmad): this probably needs to scale with Param::simulation_timestep const double dt = 1; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x = 0; int c, n, s, b, t; c = x + y * nx + z * nx * ny; #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; if (y == 0 || y == (ny - 1) || z == 0 || z == (nz - 1)) { continue; } n = c - nx; s = c + nx; b = c - nx * ny; t = c + nx * ny; c2_[c] = (c1_[c] + d * dt * (c1_[c - 1] - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; } // tile ny } // tile nz } // block ny c1_.swap(c2_); } void DiffuseEulerLeakingEdge() { const auto nx = num_boxes_axis_[0]; const auto ny = num_boxes_axis_[1]; const auto nz = num_boxes_axis_[2]; const double ibl2 = 1 / (box_length_ * box_length_); const double d = 1 - dc_[0]; std::array<int, 4> l; // TODO(ahmad): this probably needs to scale with Param::simulation_timestep const double dt = 1; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x = 0; int c, n, s, b, t; c = x + y * nx + z * nx * ny; n = (y == 0) ? c : c - nx; s = (y == ny - 1) ? c : c + nx; b = (z == 0) ? c : c - nx * ny; t = (z == nz - 1) ? c : c + nx * ny; c2_[c] = (c1_[c] + d * dt * (0 - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; l.fill(1); if (y == 0) { l[0] = 0; } if (y == ny - 1) { l[1] = 0; } if (z == 0) { l[2] = 0; } if (z == nz - 1) { l[3] = 0; } c2_[c] = (c1_[c] + d * dt * (c1_[c - 1] - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt * (l[0] * c1_[s] - 2 * c1_[c] + l[1] * c1_[n]) * ibl2 + d * dt * (l[2] * c1_[b] - 2 * c1_[c] + l[3] * c1_[t]) * ibl2) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; c2_[c] = (c1_[c] + d * dt * (c1_[c - 1] - 2 * c1_[c] + 0) * ibl2 + d * dt * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } /// Calculates the gradient for each box in the diffusion grid. /// The gradient is calculated in each direction (x, y, z) as following: /// /// c(x + box_length_) - c(x - box_length) / (2 * box_length_), /// /// where c(x) implies the concentration at position x /// /// At the edges the gradient is the same as the box next to it void CalculateGradient() { double gd = 1 / (box_length_ * 2); auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; #pragma omp parallel for collapse(2) for (size_t z = 0; z < nz; z++) { for (size_t y = 0; y < ny; y++) { for (size_t x = 0; x < nx; x++) { int c, e, w, n, s, b, t; c = x + y * nx + z * nx * ny; if (x == 0) { e = c; w = c + 2; } else if (x == nx - 1) { e = c - 2; w = c; } else { e = c - 1; w = c + 1; } if (y == 0) { n = c + 2 * nx; s = c; } else if (y == ny - 1) { n = c; s = c - 2 * nx; } else { n = c + nx; s = c - nx; } if (z == 0) { t = c + 2 * nx * ny; b = c; } else if (z == nz - 1) { t = c; b = c - 2 * nx * ny; } else { t = c + nx * ny; b = c - nx * ny; } // Let the gradient point from low to high concentration gradients_[3 * c + 0] = (c1_[w] - c1_[e]) * gd; gradients_[3 * c + 1] = (c1_[n] - c1_[s]) * gd; gradients_[3 * c + 2] = (c1_[t] - c1_[b]) * gd; } } } } /// Increase the concentration at specified position with specified amount void IncreaseConcentrationBy(const std::array<double, 3>& position, double amount) { auto idx = GetBoxIndex(position); IncreaseConcentrationBy(idx, amount); } /// Increase the concentration at specified box with specified amount void IncreaseConcentrationBy(size_t idx, double amount) { assert(idx < total_num_boxes_ && "Cell position is out of diffusion grid bounds"); c1_[idx] += amount; if (c1_[idx] > concentration_threshold_) { c1_[idx] = concentration_threshold_; } } /// Get the concentration at specified position double GetConcentration(const std::array<double, 3>& position) { return c1_[GetBoxIndex(position)]; } /// Get the (normalized) gradient at specified position void GetGradient(const std::array<double, 3>& position, std::array<double, 3>* gradient) { auto idx = GetBoxIndex(position); assert(idx < total_num_boxes_ && "Cell position is out of diffusion grid bounds"); (*gradient)[0] = gradients_[3 * idx]; (*gradient)[1] = gradients_[3 * idx + 1]; (*gradient)[2] = gradients_[3 * idx + 2]; auto norm = std::sqrt((*gradient)[0] * (*gradient)[0] + (*gradient)[1] * (*gradient)[1] + (*gradient)[2] * (*gradient)[2]); if (norm > 1e-10) { (*gradient)[0] /= norm; (*gradient)[1] /= norm; (*gradient)[2] /= norm; } } std::array<uint32_t, 3> GetBoxCoordinates( const std::array<double, 3>& position) const { std::array<uint32_t, 3> box_coord; box_coord[0] = (floor(position[0]) - grid_dimensions_[0]) / box_length_; box_coord[1] = (floor(position[1]) - grid_dimensions_[2]) / box_length_; box_coord[2] = (floor(position[2]) - grid_dimensions_[4]) / box_length_; return box_coord; } size_t GetBoxIndex(const std::array<uint32_t, 3>& box_coord) const { size_t ret = box_coord[2] * num_boxes_axis_[0] * num_boxes_axis_[1] + box_coord[1] * num_boxes_axis_[0] + box_coord[0]; return ret; } /// Calculates the box index of the substance at specified position size_t GetBoxIndex(const std::array<double, 3>& position) const { auto box_coord = GetBoxCoordinates(position); return GetBoxIndex(box_coord); } void SetDecayConstant(double mu) { mu_ = mu; } void SetConcentrationThreshold(double t) { concentration_threshold_ = t; } double GetConcentrationThreshold() { return concentration_threshold_; } double* GetAllConcentrations() { return c1_.data(); } double* GetAllGradients() { return gradients_.data(); } const std::array<size_t, 3>& GetNumBoxesArray() { return num_boxes_axis_; } size_t GetNumBoxes() { return total_num_boxes_; } double GetBoxLength() { return box_length_; } int GetSubstanceId() { return substance_; } std::string GetSubstanceName() { return substance_name_; } double GetDecayConstant() { return mu_; } int32_t* GetDimensionsPtr() { return grid_dimensions_.data(); } std::array<int32_t, 6>& GetDimensions() { return grid_dimensions_; } std::array<double, 7>& GetDiffusionCoefficients() { return dc_; } bool IsInitialized() { return initialized_; } int GetResolution() { return resolution_; } double GetBoxVolume() { return box_volume_; } template <typename F> void AddInitializer(F function) { initializers_.push_back(function); } private: /// The id of the substance of this grid int substance_ = 0; /// The name of the substance of this grid std::string substance_name_ = ""; /// The side length of each box double box_length_ = 0; /// the volume of each box double box_volume_ = 0; /// The array of concentration values std::vector<double> c1_ = {}; /// An extra concentration data buffer for faster value updating std::vector<double> c2_ = {}; /// The array of gradients (x, y, z) std::vector<double> gradients_ = {}; /// The maximum concentration value that a box can have double concentration_threshold_ = 1e15; /// The diffusion coefficients [cc, cw, ce, cs, cn, cb, ct] std::array<double, 7> dc_ = {{0}}; /// The decay constant double mu_ = 0; /// The grid dimensions of the diffusion grid std::array<int32_t, 6> grid_dimensions_ = {{0}}; /// The number of boxes at each axis [x, y, z] std::array<size_t, 3> num_boxes_axis_ = {{0}}; /// The total number of boxes in the diffusion grid size_t total_num_boxes_ = 0; /// Flag to determine if this grid has been initialized bool initialized_ = false; /// The resolution of the diffusion grid int resolution_ = 0; /// If false, grid dimensions are even; if true, they are odd bool parity_ = false; /// A list of functions that initialize this diffusion grid std::vector<std::function<double(double, double, double)>> initializers_ = {}; ClassDefNV(DiffusionGrid, 1); }; } // namespace bdm #endif // DIFFUSION_GRID_H_
convolution_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*25 + q*25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); float32x4_t _k16171819 = vld1q_f32(kernel0+16); float32x4_t _k20212223 = vld1q_f32(kernel0+20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _sum2 = vld1q_f32(outptr2); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r04 = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r04, 1); float32x4_t _r02 = vextq_f32(_r00, _r04, 2); float32x4_t _r03 = vextq_f32(_r00, _r04, 3); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r14 = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r14, 1); float32x4_t _r12 = vextq_f32(_r10, _r14, 2); float32x4_t _r13 = vextq_f32(_r10, _r14, 3); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r24 = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r24, 1); float32x4_t _r22 = vextq_f32(_r20, _r24, 2); float32x4_t _r23 = vextq_f32(_r20, _r24, 3); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r34 = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r34, 1); float32x4_t _r32 = vextq_f32(_r30, _r34, 2); float32x4_t _r33 = vextq_f32(_r30, _r34, 3); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r44 = vld1q_f32(r4 + 4); float32x4_t _r41 = vextq_f32(_r40, _r44, 1); float32x4_t _r42 = vextq_f32(_r40, _r44, 2); float32x4_t _r43 = vextq_f32(_r40, _r44, 3); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r54 = vld1q_f32(r5 + 4); float32x4_t _r51 = vextq_f32(_r50, _r54, 1); float32x4_t _r52 = vextq_f32(_r50, _r54, 2); float32x4_t _r53 = vextq_f32(_r50, _r54, 3); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k0123, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r11, _k0123, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k0123, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r13, _k0123, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r14, _k4567, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r20, _k4567, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k4567, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r22, _k4567, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r23, _k891011, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r24, _k891011, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r30, _k891011, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r31, _k891011, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r32, _k12131415, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r33, _k12131415, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r34, _k12131415, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r40, _k12131415, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r41, _k16171819, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r42, _k16171819, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r43, _k16171819, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r44, _k16171819, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r50, _k20212223, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r51, _k20212223, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r52, _k20212223, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r53, _k20212223, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r54, _k24242424, 0); vst1q_f32(outptr, _sum); vst1q_f32(outptr2, _sum2); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; outptr += 4; outptr2 += 4; } #else if (nn > 0) { asm volatile( // "veor q13, q13 \n" // "veor q14, q14 \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = out "0: \n" // q11 = rx1 / rx3 // q12 = rx2 // q13 q14 = intermediate sum register "pld [%2, #128] \n" "vld1.f32 {d16-d17}, [%2] \n"// q8 = out2 "pld [%4, #256] \n" // r1 "vld1.f32 {d18-d21}, [%4] \n"// q9 q10 = r10 r14 "add %4, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r11 "vmul.f32 q13, q9, %e19[1] \n" "vmla.f32 q8, q9, %e18[0] \n" "vext.32 q12, q9, q10, #2 \n"// r12 "vmla.f32 q7, q11, %f19[0] \n" "vmul.f32 q14, q11, %e18[1] \n" "vext.32 q11, q9, q10, #3 \n"// r13 "vmla.f32 q13, q12, %f19[1] \n" "vmla.f32 q8, q12, %f18[0] \n" "vmla.f32 q7, q11, %e20[0] \n" "vmla.f32 q14, q11, %f18[1] \n" "pld [%5, #256] \n" "vmla.f32 q13, q10, %e20[1] \n" "vmla.f32 q8, q10, %e19[0] \n" // r2 "vld1.f32 {d18-d21}, [%5] \n"// q9 q10 = r20 r24 "add %5, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r21 "vmla.f32 q7, q9, %f20[0] \n" "vmla.f32 q14, q9, %e19[1] \n" "vext.32 q12, q9, q10, #2 \n"// r22 "vmla.f32 q13, q11, %f20[1] \n" "vmla.f32 q8, q11, %f19[0] \n" "vext.32 q11, q9, q10, #3 \n"// r23 "vmla.f32 q7, q12, %e21[0] \n" "vmla.f32 q14, q12, %f19[1] \n" "vmla.f32 q13, q11, %e21[1] \n" "vmla.f32 q8, q11, %e20[0] \n" "pld [%6, #256] \n" "vmla.f32 q7, q10, %f21[0] \n" "vmla.f32 q14, q10, %e20[1] \n" // r3 "vld1.f32 {d18-d21}, [%6] \n"// q9 q10 = r30 r34 "add %6, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r31 "vmla.f32 q13, q9, %f21[1] \n" "vmla.f32 q8, q9, %f20[0] \n" "vext.32 q12, q9, q10, #2 \n"// r32 "vmla.f32 q7, q11, %e22[0] \n" "vmla.f32 q14, q11, %f20[1] \n" "vext.32 q11, q9, q10, #3 \n"// r33 "vmla.f32 q13, q12, %e22[1] \n" "vmla.f32 q8, q12, %e21[0] \n" "vmla.f32 q7, q11, %f22[0] \n" "vmla.f32 q14, q11, %e21[1] \n" "pld [%7, #256] \n" "vmla.f32 q13, q10, %f22[1] \n" "vmla.f32 q8, q10, %f21[0] \n" // r4 "vld1.f32 {d18-d21}, [%7] \n"// q9 q10 = r40 r44 "add %7, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r41 "vmla.f32 q7, q9, %e23[0] \n" "vmla.f32 q14, q9, %f21[1] \n" "vext.32 q12, q9, q10, #2 \n"// r42 "vmla.f32 q13, q11, %e23[1] \n" "vmla.f32 q8, q11, %e22[0] \n" "vext.32 q11, q9, q10, #3 \n"// r43 "vmla.f32 q7, q12, %f23[0] \n" "vmla.f32 q14, q12, %e22[1] \n" "vmla.f32 q13, q11, %f23[1] \n" "vmla.f32 q8, q11, %f22[0] \n" "pld [%3, #256] \n" "vmla.f32 q7, q10, %e24[0] \n" "vmla.f32 q14, q10, %f22[1] \n" // r0 and r5 "vld1.f32 {d18-d21}, [%3] \n"// q9 q10 = r00 r04 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r01 "vmla.f32 q13, q11, %e18[1] \n" "vext.32 q12, q9, q10, #2 \n"// r02 "vmla.f32 q7, q12, %f18[0] \n" "vext.32 q11, q9, q10, #3 \n"// r03 "pld [%8, #256] \n" "vmla.f32 q13, q11, %f18[1] \n" // r5 "vld1.f32 {d22-d25}, [%8] \n"// q11 q12 = r50 r54 "add %8, #16 \n" "vmla.f32 q8, q11, %e23[0] \n" "vmla.f32 q14, q12, %e24[0] \n" "vmla.f32 q7, q9, %e18[0] \n" "vmla.f32 q13, q10, %e19[0] \n" "vext.32 q9, q11, q12, #1 \n"// r51 "vext.32 q10, q11, q12, #2 \n"// r52 "vmla.f32 q14, q9, %e23[1] \n" "vext.32 q9, q11, q12, #3 \n"// r53 "vmla.f32 q8, q10, %f23[0] \n" "vmla.f32 q14, q9, %f23[1] \n" "vadd.f32 q7, q7, q13 \n" // "veor q13, q13 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vadd.f32 q8, q8, q14 \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = out // "veor q14, q14 \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424) // %24 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; float sum2 = 0; #if __ARM_NEON float32x4_t _r1 = vld1q_f32(r1); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _sum = vmulq_f32(_r1, _k1); float32x4_t _sum2 = vmulq_f32(_r1, _k0123); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _k2 = vld1q_f32(k2); _sum = vmlaq_f32(_sum, _r2, _k2); _sum2 = vmlaq_f32(_sum2, _r2, _k1); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k3 = vld1q_f32(k3); _sum = vmlaq_f32(_sum, _r3, _k3); _sum2 = vmlaq_f32(_sum2, _r3, _k2); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); _sum2 = vmlaq_f32(_sum2, _r4, _k3); float32x4_t _r0 = vld1q_f32(r0); _sum = vmlaq_f32(_sum, _r0, _k0123); float32x4_t _r5 = vld1q_f32(r5); _sum2 = vmlaq_f32(_sum2, _r5, _k20212223); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum = r4[4] * k4[4]; _r_t4 = vextq_f32(_r_t4, _r_t4, 1); _r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3); _sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4); sum2 = r5[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2); sum += vget_lane_f32(_ss_ss2, 0); sum2 += vget_lane_f32(_ss_ss2, 1); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; #endif // __ARM_NEON *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r04 = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r04, 1); float32x4_t _r02 = vextq_f32(_r00, _r04, 2); float32x4_t _r03 = vextq_f32(_r00, _r04, 3); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r14 = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r14, 1); float32x4_t _r12 = vextq_f32(_r10, _r14, 2); float32x4_t _r13 = vextq_f32(_r10, _r14, 3); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r24 = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r24, 1); float32x4_t _r22 = vextq_f32(_r20, _r24, 2); float32x4_t _r23 = vextq_f32(_r20, _r24, 3); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r34 = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r34, 1); float32x4_t _r32 = vextq_f32(_r30, _r34, 2); float32x4_t _r33 = vextq_f32(_r30, _r34, 3); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r44 = vld1q_f32(r4 + 4); float32x4_t _r41 = vextq_f32(_r40, _r44, 1); float32x4_t _r42 = vextq_f32(_r40, _r44, 2); float32x4_t _r43 = vextq_f32(_r40, _r44, 3); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); vst1q_f32(outptr, _sum); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( // "veor q15, q15 \n"// _sum3 = 0; "pld [%1, #128] \n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j); "add %2, #16 \n" "0: \n" "vld1.f32 {d14-d15}, [%1] \n"// _sum = vld1q_f32(outptr+j); "veor q13, q13 \n"// _sum2 = 0; "veor q14, q14 \n"// _sum3 = 0; "vext.32 q10, q8, q9, #1 \n"// _r01 "vext.32 q11, q8, q9, #2 \n"// _r02 "vext.32 q12, q8, q9, #3 \n"// _r03 "vmla.f32 q7, q8, %e14[0] \n" "vmla.f32 q13, q10, %e14[1] \n" "pld [%3, #256] \n" "vmla.f32 q14, q11, %f14[0] \n" "vmul.f32 q15, q12, %f14[1] \n" "vmla.f32 q7, q9, %e15[0] \n" "vld1.f32 {d16-d19}, [%3] \n" "add %3, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %e15[1] \n" "vmla.f32 q13, q10, %f15[0] \n" "pld [%4, #256] \n" "vmla.f32 q14, q11, %f15[1] \n" "vmla.f32 q15, q12, %e16[0] \n" "vmla.f32 q7, q9, %e16[1] \n" "vld1.f32 {d16-d19}, [%4] \n" "add %4, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %f16[0] \n" "vmla.f32 q13, q10, %f16[1] \n" "pld [%5, #256] \n" "vmla.f32 q14, q11, %e17[0] \n" "vmla.f32 q15, q12, %e17[1] \n" "vmla.f32 q7, q9, %f17[0] \n" "vld1.f32 {d16-d19}, [%5] \n" "add %5, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %f17[1] \n" "vmla.f32 q13, q10, %e18[0] \n" "pld [%6, #256] \n" "vmla.f32 q14, q11, %e18[1] \n" "vmla.f32 q15, q12, %f18[0] \n" "vmla.f32 q7, q9, %f18[1] \n" "vld1.f32 {d16-d19}, [%6] \n" "add %6, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %e19[0] \n" "vmla.f32 q13, q10, %e19[1] \n" "vmla.f32 q14, q11, %f19[0] \n" "vmla.f32 q15, q12, %f19[1] \n" "vmla.f32 q7, q9, %e20[0] \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q7, q7, q13 \n" // "veor q15, q15 \n"// _sum3 = 0; "pld [%2, #256] \n" "vadd.f32 q7, q7, q14 \n" "vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j); "add %2, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum = r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } } static void conv5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*25 + q*25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); float32x4_t _k16171819 = vld1q_f32(kernel0+16); float32x4_t _k20212223 = vld1q_f32(kernel0+20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4x2_t _r00_02461357 = vld2q_f32(r0); float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8); float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14 float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15 float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6 float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7 float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8 float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9 float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10 float32x4x2_t _r10_02461357 = vld2q_f32(r1); float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8); float32x4_t _r1_8101214 = _r10nx2.val[0]; float32x4_t _r1_9111315 = _r10nx2.val[1]; float32x4_t _r10 = _r10_02461357.val[0]; float32x4_t _r11 = _r10_02461357.val[1]; float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1); float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1); float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2); float32x4x2_t _r20_02461357 = vld2q_f32(r2); float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8); float32x4_t _r2_8101214 = _r20nx2.val[0]; float32x4_t _r2_9111315 = _r20nx2.val[1]; float32x4_t _r20 = _r20_02461357.val[0]; float32x4_t _r21 = _r20_02461357.val[1]; float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1); float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1); float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2); float32x4x2_t _r30_02461357 = vld2q_f32(r3); float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8); float32x4_t _r3_8101214 = _r30nx2.val[0]; float32x4_t _r3_9111315 = _r30nx2.val[1]; float32x4_t _r30 = _r30_02461357.val[0]; float32x4_t _r31 = _r30_02461357.val[1]; float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1); float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1); float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2); float32x4x2_t _r40_02461357 = vld2q_f32(r4); float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8); float32x4_t _r4_8101214 = _r40nx2.val[0]; float32x4_t _r4_9111315 = _r40nx2.val[1]; float32x4_t _r40 = _r40_02461357.val[0]; float32x4_t _r41 = _r40_02461357.val[1]; float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1); float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1); float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); vst1q_f32(outptr, _sum); r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( "veor q15, q15 \n"// _sump3 = 0; "pld [%1, #128] \n" "veor q13, q13 \n"// _sump2 = 0; "pld [%2, #256] \n" "veor q14, q14 \n"// _sump3 = 0; "vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7 "pld [%2, #256] \n" "vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15 "0: \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr "vext.32 q12, q8, q10, #1 \n"// q12 = 2 4 6 8 "vext.32 q11, q9, q11, #1 \n"// q11 = 3 5 7 9 "vext.32 q10, q8, q10, #2 \n"// q10 = 4 6 8 10 "vmla.f32 q7, q8, %e14[0] \n" "vmla.f32 q13, q9, %e14[1] \n" "pld [%3, #256] \n" "vmla.f32 q14, q12, %f14[0] \n" "vmla.f32 q15, q11, %f14[1] \n" "vmla.f32 q7, q10, %e15[0] \n" "vld2.f32 {d16-d19}, [%3]! \n" "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %e15[1] \n" "vmla.f32 q13, q9, %f15[0] \n" "pld [%4, #256] \n" "vmla.f32 q14, q12, %f15[1] \n" "vmla.f32 q15, q11, %e16[0] \n" "vmla.f32 q7, q10, %e16[1] \n" "vld2.f32 {d16-d19}, [%4]! \n" "pld [%4, #256] \n" "vld2.f32 {d20-d23}, [%4] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %f16[0] \n" "vmla.f32 q13, q9, %f16[1] \n" "pld [%5, #256] \n" "vmla.f32 q14, q12, %e17[0] \n" "vmla.f32 q15, q11, %e17[1] \n" "vmla.f32 q7, q10, %f17[0] \n" "vld2.f32 {d16-d19}, [%5]! \n" "pld [%5, #256] \n" "vld2.f32 {d20-d23}, [%5] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %f17[1] \n" "vmla.f32 q13, q9, %e18[0] \n" "pld [%6, #256] \n" "vmla.f32 q14, q12, %e18[1] \n" "vmla.f32 q15, q11, %f18[0] \n" "vmla.f32 q7, q10, %f18[1] \n" "vld2.f32 {d16-d19}, [%6]! \n" "pld [%6, #256] \n" "vld2.f32 {d20-d23}, [%6] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %e19[0] \n" "vmla.f32 q13, q9, %e19[1] \n" "vmla.f32 q14, q12, %f19[0] \n" "vmla.f32 q15, q11, %f19[1] \n" "vmla.f32 q7, q10, %e20[0] \n" "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7 "vadd.f32 q14, q14, q15 \n" "vadd.f32 q7, q7, q13 \n" "veor q15, q15 \n"// _sump3 = 0; "veor q13, q13 \n"// _sump2 = 0; "pld [%2, #256] \n" "vadd.f32 q7, q7, q14 \n" "vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15 "veor q14, q14 \n"// _sump3 = 0; "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); sum += r0[4] * k0[4]; sum += r1[4] * k1[4]; sum += r2[4] * k2[4]; sum += r3[4] * k3[4]; sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr += sum; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } } }
convolution_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // author:BUG1989 (https://github.com/BUG1989/) Long-term support. // author:FuGuangping (https://github.com/fu1899) Implemented the first version of INT8 quantization on ARMv7. // // Copyright (C) 2019 BUG1989. All rights reserved. // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd23_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(4*4, inch, outch, 2ul); // G const short ktm[4][3] = { { 2, 0, 0}, { 1, 1, 1}, { 1, -1, 1}, { 0, 0, 2} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[4][3]; for (int i=0; i<4; i++) { tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<4; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<4; i++) { kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<4; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4, 2u); int p = 0; for (; p+7<outch; p+=8) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; const short* kernel4 = (const short*)kernel_tm + (p+4)*inch*16; const short* kernel5 = (const short*)kernel_tm + (p+5)*inch*16; const short* kernel6 = (const short*)kernel_tm + (p+6)*inch*16; const short* kernel7 = (const short*)kernel_tm + (p+7)*inch*16; short* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; kernel4 += 16; kernel5 += 16; kernel6 += 16; kernel7 += 16; } } for (; p+3<outch; p+=4) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; } } for (; p<outch; p++) { const short* kernel0 = (const short*)kernel_tm + p*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 16; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd23_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*4, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j=0; j<nColBlocks; j++) { const signed char* r0 = img + w * j * 2; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; for (int i = 0; i<nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON #if __aarch64__ asm volatile( // load "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v1.8b}, [%1] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.8b}, [%2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v3.8b}, [%3] \n" // w = B_t * d, trans int8 to int16 "ssubl v4.8h, v0.8b, v2.8b \n" // d4 "saddl v5.8h, v1.8b, v2.8b \n" // d6 "ssubl v6.8h, v2.8b, v1.8b \n" // d8 "ssubl v7.8h, v3.8b, v1.8b \n" // d10 // transpose w to w_t "trn1 v8.4h, v4.4h, v5.4h \n" "trn2 v9.4h, v4.4h, v5.4h \n" "trn1 v10.4h, v6.4h, v7.4h \n" "trn2 v11.4h, v6.4h, v7.4h \n" "trn1 v0.2s, v8.2s, v10.2s \n" "trn2 v2.2s, v8.2s, v10.2s \n" "trn1 v1.2s, v9.2s, v11.2s \n" "trn2 v3.2s, v9.2s, v11.2s \n" // U = B_t * d_t "sub v4.4h, v0.4h, v2.4h \n" "add v5.4h, v1.4h, v2.4h \n" "sub v6.4h, v2.4h, v1.4h \n" "sub v7.4h, v3.4h, v1.4h \n" // save "st1 {v4.4h}, [%4] \n" "st1 {v5.4h}, [%5] \n" "st1 {v6.4h}, [%6] \n" "st1 {v7.4h}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else asm volatile( // load "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "pld [%1, #64] \n" "vld1.s8 {d1}, [%1] \n" "pld [%2, #64] \n" "vld1.s8 {d2}, [%2] \n" "pld [%3, #64] \n" "vld1.s8 {d3}, [%3] \n" // w = B_t * d, trans int8 to int16 "vsubl.s8 q2, d0, d2 \n" // d4 "vaddl.s8 q3, d1, d2 \n" // d6 "vsubl.s8 q4, d2, d1 \n" // d8 "vsubl.s8 q5, d3, d1 \n" // d10 // transpose w to w_t "vtrn.s16 d4, d6 \n" "vtrn.s16 d8, d10 \n" "vtrn.s32 d4, d8 \n" "vtrn.s32 d6, d10 \n" // U = B_t * d_t "vsub.s16 d11, d4, d8 \n" "vadd.s16 d12, d6, d8 \n" "vsub.s16 d13, d8, d6 \n" "vsub.s16 d14, d10, d6 \n" // save "vst1.s32 {d11}, [%4] \n" "vst1.s32 {d12}, [%5] \n" "vst1.s32 {d13}, [%6] \n" "vst1.s32 {d14}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #endif // __aarch64__ #else short d0[4],d1[4],d2[4],d3[4]; short w0[4],w1[4],w2[4],w3[4]; short t0[4],t1[4],t2[4],t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; } // U = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n] = d0[n]; out_tm1[n] = d1[n]; out_tm2[n] = d2[n]; out_tm3[n] = d3[n]; } #endif r0 += 2; r1 += 2; r2 += 2; r3 += 2; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<4; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; output4_tm += 16; output5_tm += 16; output6_tm += 16; output7_tm += 16; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) //"prfm pldl1keep, [%2, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%1] \n" "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif output0_tm += 16; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; #if __ARM_NEON int32x2_t _shift = vdup_n_s32(-2); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "add v0.4s, v0.4s, v1.4s \n" // s0 = s0 + s1 + s2; "sub v1.4s, v1.4s, v2.4s \n" "add v0.4s, v0.4s, v2.4s \n" // s1 = s1 - s2 + s3; "add v1.4s, v1.4s, v3.4s \n" "trn1 v4.4s, v0.4s, v1.4s \n" "trn2 v5.4s, v0.4s, v1.4s \n" "dup v6.2d, v4.d[1] \n" "dup v7.2d, v5.d[1] \n" "add v0.2s, v4.2s, v5.2s \n" // o0 = d0 + d1 + d2; "sub v1.2s, v5.2s, v6.2s \n" "add v0.2s, v0.2s, v6.2s \n" // o1 = d1 - d2 + d3; "add v1.2s, v1.2s, v7.2s \n" "sshl v0.2s, v0.2s, %6.2s \n" // o0 = o0 >> 2 "sshl v1.2s, v1.2s, %6.2s \n" // o1 = o1 >> 2 "st1 {v0.2s}, [%1], #8 \n" "st1 {v1.2s}, [%2], #8 \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "vaddq.s32 q0, q0, q1 \n" // s0 = s0 + s1 + s2; "vsubq.s32 q1, q1, q2 \n" "vaddq.s32 q0, q0, q2 \n" // s1 = s1 - s2 + s3; "vaddq.s32 q1, q1, q3 \n" "vtrn.s32 q0, q1 \n" "vadd.s32 d8, d0, d2 \n" // o0 = d0 + d1 + d2; "vsub.s32 d9, d2, d1 \n" "vadd.s32 d8, d8, d1 \n" // o1 = d1 - d2 + d3; "vadd.s32 d9, d9, d3 \n" "vshl.s32 d8, d8, %P6 \n" // o0 = o0 >> 2 "vshl.s32 d9, d9, %P6 \n" // o1 = o1 >> 2 "vst1.s32 {d8}, [%1]! \n" "vst1.s32 {d9}, [%2]! \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q4" ); #endif // __aarch64__ #else int s0[4],s1[4],s2[4],s3[4]; int w0[4],w1[4]; int d0[2],d1[2],d2[2],d3[2]; int o0[2],o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 4]; s2[n] = out_tile[n+ 8]; s3[n] = out_tile[n+12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n]; o1[n] = d1[n] - d2[n] + d3[n]; } // save to top blob tm,why right 2,because the G' = G*2 outRow0[0] = o0[0] >> 2; outRow0[1] = o0[1] >> 2; outRow1[0] = o1[0] >> 2; outRow1[1] = o1[1] >> 2; out_tile += 16; outRow0 += 2; outRow1 += 2; #endif // __ARM_NEON } outRow0 += outw; outRow1 += outw; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s1_winograd43_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(6*6, inch, outch, 2ul); // G // const float ktm[6][3] = { // { 1.0f/4, 0.0f, 0.0f}, // { -1.0f/6, -1.0f/6, -1.0f/6}, // { -1.0f/6, 1.0f/6, -1.0f/6}, // { 1.0f/24, 1.0f/12, 1.0f/6}, // { 1.0f/24, -1.0f/12, 1.0f/6}, // { 0.0f, 0.0f, 1.0f} // }; const short ktm[6][3] = { { 6, 0, 0}, { -4, -4, -4}, { -4, 4, -4}, { 1, 2, 4}, { 1, -2, 4}, { 0, 0, 24} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i=0; i<6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<6; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<6; i++) { kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<9; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4, 2u); int p = 0; for (; p+7<outch; p+=8) { const short* kernel0 = (const short*)kernel_tm.channel(p); const short* kernel1 = (const short*)kernel_tm.channel(p+1); const short* kernel2 = (const short*)kernel_tm.channel(p+2); const short* kernel3 = (const short*)kernel_tm.channel(p+3); const short* kernel4 = (const short*)kernel_tm.channel(p+4); const short* kernel5 = (const short*)kernel_tm.channel(p+5); const short* kernel6 = (const short*)kernel_tm.channel(p+6); const short* kernel7 = (const short*)kernel_tm.channel(p+7); short* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p+3<outch; p+=4) { const short* kernel0 = (const short*)kernel_tm.channel(p); const short* kernel1 = (const short*)kernel_tm.channel(p+1); const short* kernel2 = (const short*)kernel_tm.channel(p+2); const short* kernel3 = (const short*)kernel_tm.channel(p+3); short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p<outch; p++) { const short* kernel0 = (const short*)kernel_tm.channel(p); short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 36; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd43_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3]; out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1]; out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5]; out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3]; out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1]; out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5]; out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3]; out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1]; out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5]; #else short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; int* outRow2 = outRow0 + outw * 2; int* outRow3 = outRow0 + outw * 3; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w1, _w2, _w3; int32x2_t _w0n, _w1n, _w2n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile+4); _s1 = vld1q_s32(out_tile+6); _s1n = vld1_s32(out_tile+10); _s2 = vld1q_s32(out_tile+12); _s2n = vld1_s32(out_tile+16); _s3 = vld1q_s32(out_tile+18); _s3n = vld1_s32(out_tile+22); _s4 = vld1q_s32(out_tile+24); _s4n = vld1_s32(out_tile+28); _s5 = vld1q_s32(out_tile+30); _s5n = vld1_s32(out_tile+34); // w = A_T * W int32x2_t _tp0 = {-1, 2}; int32x2_t _tp1 = {-2, 4}; int32x2_t _tp2 = {8, -8}; _w0 = vaddq_s32(_s0, _s1); _w0n = vadd_s32(_s0n, _s1n); _w0 = vaddq_s32(_w0, _s2); _w0n = vadd_s32(_w0n, _s2n); _w0 = vaddq_s32(_w0, _s3); _w0n = vadd_s32(_w0n, _s3n); _w0 = vaddq_s32(_w0, _s4); _w0n = vadd_s32(_w0n, _s4n); _w1 = vsubq_s32(_s1, _s2); _w1n = vsub_s32(_s1n, _s2n); _w1 = vmlaq_lane_s32(_w1, _s3, _tp0, 1); _w1n = vmla_lane_s32(_w1n, _s3n, _tp0, 1); _w1 = vmlaq_lane_s32(_w1, _s4, _tp1, 0); _w1n = vmla_lane_s32(_w1n, _s4n, _tp1, 0); _w2 = vaddq_s32(_s1, _s2); _w2n = vadd_s32(_s1n, _s2n); _w2 = vmlaq_lane_s32(_w2, _s3, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s3n, _tp1, 1); _w2 = vmlaq_lane_s32(_w2, _s4, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s4n, _tp1, 1); _w3 = vsubq_s32(_s1, _s2); _w3n = vsub_s32(_s1n, _s2n); _w3 = vmlaq_lane_s32(_w3, _s3, _tp2, 0); _w3n = vmla_lane_s32(_w3n, _s3n, _tp2, 0); _w3 = vmlaq_lane_s32(_w3, _s4, _tp2, 1); _w3n = vmla_lane_s32(_w3n, _s4n, _tp2, 1); _w3 = vaddq_s32(_w3, _s5); _w3n = vadd_s32(_w3n, _s5n); // transpose w to w_t { _d0[0] = _w0[0]; _d0[1] = _w1[0]; _d0[2] = _w2[0]; _d0[3] = _w3[0]; _d1[0] = _w0[1]; _d1[1] = _w1[1]; _d1[2] = _w2[1]; _d1[3] = _w3[1]; _d2[0] = _w0[2]; _d2[1] = _w1[2]; _d2[2] = _w2[2]; _d2[3] = _w3[2]; _d3[0] = _w0[3]; _d3[1] = _w1[3]; _d3[2] = _w2[3]; _d3[3] = _w3[3]; _d4[0] = _w0n[0]; _d4[1] = _w1n[0]; _d4[2] = _w2n[0]; _d4[3] = _w3n[0]; _d5[0] = _w0n[1]; _d5[1] = _w1n[1]; _d5[2] = _w2n[1]; _d5[3] = _w3n[1]; } // Y = A_T * w_t _o0 = vaddq_s32(_d0, _d1); _o0 = vaddq_s32(_o0, _d2); _o0 = vaddq_s32(_o0, _d3); _o0 = vaddq_s32(_o0, _d4); _o1 = vsubq_s32(_d1, _d2); _o1 = vmlaq_lane_s32(_o1, _d3, _tp0, 1); _o1 = vmlaq_lane_s32(_o1, _d4, _tp1, 0); _o2 = vaddq_s32(_d1, _d2); _o2 = vmlaq_lane_s32(_o2, _d3, _tp1, 1); _o2 = vmlaq_lane_s32(_o2, _d4, _tp1, 1); _o3 = vsubq_s32(_d1, _d2); _o3 = vmlaq_lane_s32(_o3, _d3, _tp2, 0); _o3 = vmlaq_lane_s32(_o3, _d4, _tp2, 1); _o3 = vaddq_s32(_o3, _d5); // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = _o0[n] / 576; outRow1[n] = _o1[n] / 576; outRow2[n] = _o2[n] / 576; outRow3[n] = _o3[n] / 576; } #else int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] / 576; outRow1[n] = o1[n] / 576; outRow2[n] = o2[n] / 576; outRow3[n] = o3[n] / 576; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s1_winograd43_dequant_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3]; out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1]; out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5]; out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3]; out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1]; out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5]; out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3]; out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1]; out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5]; #else short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); float* outRow0 = top_blob_bordered.channel(p); float* outRow1 = outRow0 + outw; float* outRow2 = outRow0 + outw * 2; float* outRow3 = outRow0 + outw * 3; const float bias0 = bias ? bias[p] : 0.f; const float scale_dequant0 = scales_dequant[p]; const float scale0 = scale_dequant0 / 576.0; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w1, _w2, _w3; int32x2_t _w0n, _w1n, _w2n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile+4); _s1 = vld1q_s32(out_tile+6); _s1n = vld1_s32(out_tile+10); _s2 = vld1q_s32(out_tile+12); _s2n = vld1_s32(out_tile+16); _s3 = vld1q_s32(out_tile+18); _s3n = vld1_s32(out_tile+22); _s4 = vld1q_s32(out_tile+24); _s4n = vld1_s32(out_tile+28); _s5 = vld1q_s32(out_tile+30); _s5n = vld1_s32(out_tile+34); // w = A_T * W int32x2_t _tp0 = {-1, 2}; int32x2_t _tp1 = {-2, 4}; int32x2_t _tp2 = {8, -8}; _w0 = vaddq_s32(_s0, _s1); _w0n = vadd_s32(_s0n, _s1n); _w0 = vaddq_s32(_w0, _s2); _w0n = vadd_s32(_w0n, _s2n); _w0 = vaddq_s32(_w0, _s3); _w0n = vadd_s32(_w0n, _s3n); _w0 = vaddq_s32(_w0, _s4); _w0n = vadd_s32(_w0n, _s4n); _w1 = vsubq_s32(_s1, _s2); _w1n = vsub_s32(_s1n, _s2n); _w1 = vmlaq_lane_s32(_w1, _s3, _tp0, 1); _w1n = vmla_lane_s32(_w1n, _s3n, _tp0, 1); _w1 = vmlaq_lane_s32(_w1, _s4, _tp1, 0); _w1n = vmla_lane_s32(_w1n, _s4n, _tp1, 0); _w2 = vaddq_s32(_s1, _s2); _w2n = vadd_s32(_s1n, _s2n); _w2 = vmlaq_lane_s32(_w2, _s3, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s3n, _tp1, 1); _w2 = vmlaq_lane_s32(_w2, _s4, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s4n, _tp1, 1); _w3 = vsubq_s32(_s1, _s2); _w3n = vsub_s32(_s1n, _s2n); _w3 = vmlaq_lane_s32(_w3, _s3, _tp2, 0); _w3n = vmla_lane_s32(_w3n, _s3n, _tp2, 0); _w3 = vmlaq_lane_s32(_w3, _s4, _tp2, 1); _w3n = vmla_lane_s32(_w3n, _s4n, _tp2, 1); _w3 = vaddq_s32(_w3, _s5); _w3n = vadd_s32(_w3n, _s5n); // transpose w to w_t { _d0[0] = _w0[0]; _d0[1] = _w1[0]; _d0[2] = _w2[0]; _d0[3] = _w3[0]; _d1[0] = _w0[1]; _d1[1] = _w1[1]; _d1[2] = _w2[1]; _d1[3] = _w3[1]; _d2[0] = _w0[2]; _d2[1] = _w1[2]; _d2[2] = _w2[2]; _d2[3] = _w3[2]; _d3[0] = _w0[3]; _d3[1] = _w1[3]; _d3[2] = _w2[3]; _d3[3] = _w3[3]; _d4[0] = _w0n[0]; _d4[1] = _w1n[0]; _d4[2] = _w2n[0]; _d4[3] = _w3n[0]; _d5[0] = _w0n[1]; _d5[1] = _w1n[1]; _d5[2] = _w2n[1]; _d5[3] = _w3n[1]; } // Y = A_T * w_t _o0 = vaddq_s32(_d0, _d1); _o0 = vaddq_s32(_o0, _d2); _o0 = vaddq_s32(_o0, _d3); _o0 = vaddq_s32(_o0, _d4); _o1 = vsubq_s32(_d1, _d2); _o1 = vmlaq_lane_s32(_o1, _d3, _tp0, 1); _o1 = vmlaq_lane_s32(_o1, _d4, _tp1, 0); _o2 = vaddq_s32(_d1, _d2); _o2 = vmlaq_lane_s32(_o2, _d3, _tp1, 1); _o2 = vmlaq_lane_s32(_o2, _d4, _tp1, 1); _o3 = vsubq_s32(_d1, _d2); _o3 = vmlaq_lane_s32(_o3, _d3, _tp2, 0); _o3 = vmlaq_lane_s32(_o3, _d4, _tp2, 1); _o3 = vaddq_s32(_o3, _d5); // save to top blob tm float32x4_t _scale0 = vdupq_n_f32(scale0); float32x4_t _out0_f32 = vdupq_n_f32(bias0); float32x4_t _out1_f32 = vdupq_n_f32(bias0); float32x4_t _out2_f32 = vdupq_n_f32(bias0); float32x4_t _out3_f32 = vdupq_n_f32(bias0); _out0_f32 = vmlaq_f32(_out0_f32, vcvtq_f32_s32(_o0), _scale0); _out1_f32 = vmlaq_f32(_out1_f32, vcvtq_f32_s32(_o1), _scale0); _out2_f32 = vmlaq_f32(_out2_f32, vcvtq_f32_s32(_o2), _scale0); _out3_f32 = vmlaq_f32(_out3_f32, vcvtq_f32_s32(_o3), _scale0); vst1q_f32(outRow0, _out0_f32); vst1q_f32(outRow1, _out1_f32); vst1q_f32(outRow2, _out2_f32); vst1q_f32(outRow3, _out3_f32); #else int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = (float)o0[n] * scale0 + bias0; outRow1[n] = (float)o1[n] * scale0 + bias0; outRow2[n] = (float)o2[n] * scale0 + bias0; outRow3[n] = (float)o3[n] * scale0 + bias0; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s2_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*9, inch, outch/8 + outch%8, (size_t)1u); const signed char* kernel = _kernel; int p=0; for (; p+7<outch; p+=8) { const signed char* k0 = kernel + (p+0)*inch*9; const signed char* k1 = kernel + (p+1)*inch*9; const signed char* k2 = kernel + (p+2)*inch*9; const signed char* k3 = kernel + (p+3)*inch*9; const signed char* k4 = kernel + (p+4)*inch*9; const signed char* k5 = kernel + (p+5)*inch*9; const signed char* k6 = kernel + (p+6)*inch*9; const signed char* k7 = kernel + (p+7)*inch*9; signed char* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[0] = k0[k]; ktmp[1] = k1[k]; ktmp[2] = k2[k]; ktmp[3] = k3[k]; ktmp[4] = k4[k]; ktmp[5] = k5[k]; ktmp[6] = k6[k]; ktmp[7] = k7[k]; ktmp += 8; } k0 += 9; k1 += 9; k2 += 9; k3 += 9; k4 += 9; k5 += 9; k6 += 9; k7 += 9; } } for (; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*9; signed char* ktmp = kernel_tm.channel(p/8 + p%8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[k] = k0[k]; } ktmp += 9; k0 += 9; } } } static void conv3x3s2_packed_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; int nn_outch = outch >> 3; int remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p+0); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); Mat out6 = top_blob.channel(p+6); Mat out7 = top_blob.channel(p+7); out0.fill(0); out1.fill(0); out2.fill(0); out3.fill(0); out4.fill(0); out5.fill(0); out6.fill(0); out7.fill(0); const signed char* ktmp = _kernel.channel(p/8); for (int q=0; q<inch; q++) { int* outptr0 = out0; int* outptr1 = out1; int* outptr2 = out2; int* outptr3 = out3; int* outptr4 = out4; int* outptr5 = out5; int* outptr6 = out6; int* outptr7 = out7; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%9], #16 \n"//r0-r2 "ld2 {v5.8b, v6.8b}, [%9] \n" "ld1 {v8.4s, v9.4s}, [%1] \n"//out0 "ld1 {v10.4s, v11.4s}, [%2] \n"//out1 "ld1 {v12.4s, v13.4s}, [%3] \n"//out2 "ld1 {v14.4s, v15.4s}, [%4] \n"//out3 "ld1 {v16.4s, v17.4s}, [%5] \n"//out4 "ld1 {v18.4s, v19.4s}, [%6] \n"//out5 "ld1 {v20.4s, v21.4s}, [%7] \n"//out6 "ld1 {v22.4s, v23.4s}, [%8] \n"//out7 "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k00-k70) "sshll v1.8h, v1.8b, #0 \n"//(k01-k71) "sshll v2.8h, v2.8b, #0 \n"//(k02-k72) "sshll v3.8h, v3.8b, #0 \n"// r0 "sshll v4.8h, v4.8b, #0 \n"// r1 "sshll v7.8h, v7.8b, #0 \n"// r2 // r0 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r00-r07)*k00 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r00-r07)*k10 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r00-r07)*k20 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r00-r07)*k30 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r00-r07)*k40 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r00-r07)*k50 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r00-r07)*k60 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r00-r07)*k70 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r1 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r10-r17)*k01 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r10-r17)*k11 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r10-r17)*k21 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r10-r17)*k31 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r10-r17)*k41 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r10-r17)*k51 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r10-r17)*k61 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r10-r17)*k71 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r2 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r20-r27)*k02 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r20-r27)*k12 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r20-r27)*k22 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r20-r27)*k32 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r20-r27)*k42 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r20-r27)*k52 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r20-r27)*k62 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r20-r27)*k72 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%10], #16 \n"//r3-r5 "ld2 {v5.8b, v6.8b}, [%10] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k03-k73) "sshll v1.8h, v1.8b, #0 \n"//(k04-k74) "sshll v2.8h, v2.8b, #0 \n"//(k05-k75) "sshll v3.8h, v3.8b, #0 \n"// r3 "sshll v4.8h, v4.8b, #0 \n"// r4 "sshll v7.8h, v7.8b, #0 \n"// r5 // r3 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r30-r37)*k03 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r30-r37)*k13 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r30-r37)*k23 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r30-r37)*k33 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r30-r37)*k43 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r30-r37)*k53 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r30-r37)*k63 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r30-r37)*k73 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r4 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r40-r47)*k04 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r40-r47)*k14 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r40-r47)*k24 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r40-r47)*k34 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r40-r47)*k44 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r40-r47)*k54 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r40-r47)*k64 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r40-r47)*k74 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r5 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r50-r57)*k05 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r50-r57)*k15 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r50-r57)*k25 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r50-r57)*k35 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r50-r57)*k45 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r50-r57)*k55 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r50-r57)*k65 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r50-r57)*k75 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%11], #16 \n"//r6-r8 "ld2 {v5.8b, v6.8b}, [%11] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k06-k76) "sshll v1.8h, v1.8b, #0 \n"//(k07-k77) "sshll v2.8h, v2.8b, #0 \n"//(k08-k78) "sshll v3.8h, v3.8b, #0 \n"// r6 "sshll v4.8h, v4.8b, #0 \n"// r7 "sshll v7.8h, v7.8b, #0 \n"// r8 // r6 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r60-r67)*k06 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r60-r67)*k16 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r60-r67)*k26 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r60-r67)*k36 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r60-r67)*k46 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r60-r67)*k56 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r60-r67)*k66 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r60-r67)*k76 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r7 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r70-r77)*k07 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r70-r77)*k17 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r70-r77)*k27 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r70-r77)*k37 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r70-r77)*k47 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r70-r77)*k57 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r70-r77)*k67 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r70-r77)*k77 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r8 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r80-r87)*k08 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r80-r87)*k18 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r80-r87)*k28 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r80-r87)*k38 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r80-r87)*k48 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r80-r87)*k58 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r80-r87)*k68 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r80-r87)*k78 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "st1 {v16.4s, v17.4s}, [%5], #32 \n" "st1 {v18.4s, v19.4s}, [%6], #32 \n" "st1 {v20.4s, v21.4s}, [%7], #32 \n" "st1 {v22.4s, v23.4s}, [%8], #32 \n" "subs %w0, %w0, #1 \n" "sub %12, %12, #72 \n"// reset ktmp "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #128] \n" "vld1.s32 {d16-d17}, [%1] \n"// out0 "pld [%2, #128] \n" "vld1.s32 {d18-d19}, [%2] \n"// out1 "pld [%3, #128] \n" "vld1.s32 {d20-d21}, [%3] \n"// out2 "pld [%4, #128] \n" "vld1.s32 {d22-d23}, [%4] \n"// out3 // r0 "pld [%9, #64] \n" "vld2.s8 {d8-d9}, [%9] \n"// d8(a00 a02 a04 a06 a08 a010 a012 a014), d9(a01 a03 a05 a07 a09 a011 a013 a015) "add %9, #8 \n" "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k00-k70) d1(k01-k71) d2(k02-k72) "pld [%5, #128] \n" "vld1.s32 {d24-d25}, [%5] \n"// out4 "pld [%6, #128] \n" "vld1.s32 {d26-d27}, [%6] \n"// out5 "vmovl.s8 q2, d2 \n"// q2(k02-k72) "vmovl.s8 q1, d1 \n"// q1(k01-k71) "vmovl.s8 q0, d0 \n"// q0(k00-k70) "vext.s8 d12, d8, d8, #1 \n"// d12(a02 a04 a06 a08 x x x x) "pld [%7, #128] \n" "vld1.s32 {d28-d29}, [%7] \n"// out6 "vmovl.s8 q5, d9 \n"// q5(a01 a03 a05 a07 a09 a011 a013 a015) d11 "vmovl.s8 q4, d8 \n"// q4(a00 a02 a04 a06 a08 a010 a012 a014) d9 "vmovl.s8 q6, d12 \n"// q6(a02 a04 a06 a08 a010 a012 a014 a016) d13 "pld [%8, #128] \n" "vld1.s32 {d30-d31}, [%8] \n"// out7 "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a00 a02 a04 a06) * k00 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a00 a02 a04 a06) * k10 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a00 a02 a04 a06) * k20 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a00 a02 a04 a06) * k30 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a00 a02 a04 a06) * k40 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a00 a02 a04 a06) * k50 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a00 a02 a04 a06) * k60 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a00 a02 a04 a06) * k70 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a01-a07) * k01 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a01-a07) * k11 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a01-a07) * k21 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a01-a07) * k31 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a01-a07) * k41 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a01-a07) * k51 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a01-a07) * k61 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a01-a07) * k71 "pld [%10, #64] \n" "vld2.s8 {d8-d9}, [%10] \n"// d8(a10 a12 a14 a16 a18 a110 a112 a114), d9(a11 a13 a15 a17 a19 a111 a113 a115) "add %10, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a02-a08) * k02 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a02-a08) * k12 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a02-a08) * k22 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a02-a08) * k32 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k03-k73) d1(k04-k74) d2(k05-k75) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a02-a08) * k42 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a02-a08) * k52 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a02-a08) * k62 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a02-a08) * k72 // r1 "vext.s8 d12, d8, d8, #1 \n"// d12(a12 a14 a16 a18 x x x x) "vmovl.s8 q2, d2 \n"// q2(k05-k75) "vmovl.s8 q1, d1 \n"// q1(k04-k74) "vmovl.s8 q0, d0 \n"// q0(k03-k73) "vmovl.s8 q5, d9 \n"// q5(a11-a115) "vmovl.s8 q4, d8 \n"// q4(a10-a114) "vmovl.s8 q6, d12 \n"// q6(a12-a116) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a10-a16) * k03 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a10-a16) * k13 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a10-a16) * k23 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a10-a16) * k33 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a10-a16) * k43 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a10-a16) * k53 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a10-a16) * k63 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a10-a16) * k73 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a11-a17) * k04 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a11-a17) * k14 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a11-a17) * k24 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a11-a17) * k34 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a11-a17) * k44 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a11-a17) * k54 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a11-a17) * k64 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a11-a17) * k74 "pld [%11, #64] \n" "vld2.s8 {d8-d9}, [%11] \n"// d8(a20 a22 a24 a26 a28 a210 a212 a214), d9(a21 a23 a25 a27 a29 a211 a213 a215) "add %11, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a12-a18) * k05 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a12-a18) * k15 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a12-a18) * k25 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a12-a18) * k35 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k06-k76) d1(k07-k77) d2(k08-k78) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a12-a18) * k45 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a12-a18) * k55 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a12-a18) * k65 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a12-a18) * k75 // r2 "vext.s8 d12, d8, d8, #1 \n"// d12(a22 a24 a26 a28 x x x x) "vmovl.s8 q2, d2 \n"// q2(k08-k78) "vmovl.s8 q1, d1 \n"// q1(k07-k77) "vmovl.s8 q0, d0 \n"// q0(k06-k76) "vmovl.s8 q5, d9 \n"// q5(a21-a215) "vmovl.s8 q4, d8 \n"// q4(a20-a214) "vmovl.s8 q6, d12 \n"// q6(a22-a216) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a20-a26) * k06 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a20-a26) * k16 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a20-a26) * k26 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a20-a26) * k36 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a20-a26) * k46 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a20-a26) * k56 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a20-a26) * k66 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a20-a26) * k76 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a21-a27) * k07 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a21-a27) * k17 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a21-a27) * k27 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a21-a27) * k37 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a21-a27) * k47 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a21-a27) * k57 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a21-a27) * k67 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a21-a27) * k77 "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a22-a28) * k08 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a22-a28) * k18 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a22-a28) * k28 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a22-a28) * k38 "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a22-a28) * k48 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a22-a28) * k58 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a22-a28) * k68 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a22-a28) * k78 // save s32 to memory "sub %12, %12, #72 \n" "vst1.s32 {d16-d17}, [%1]! \n"// out0 "vst1.s32 {d18-d19}, [%2]! \n"// out1 "vst1.s32 {d20-d21}, [%3]! \n"// out2 "vst1.s32 {d22-d23}, [%4]! \n"// out3 "subs %0, #1 \n" "vst1.s32 {d24-d25}, [%5]! \n"// out4 "vst1.s32 {d26-d27}, [%6]! \n"// out5 "vst1.s32 {d28-d29}, [%7]! \n"// out6 "vst1.s32 {d30-d31}, [%8]! \n"// out7 "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ int8x8_t _r0_s8 = vld1_s8(r0);// (a00 a01 a02 ....) int8x8_t _r1_s8 = vld1_s8(r1);// (a10 a11 a12 ....) int8x8_t _r2_s8 = vld1_s8(r2);// (a20 a21 a22 ....) int16x8_t _r0 = vmovl_s8(_r0_s8); int16x8_t _r1 = vmovl_s8(_r1_s8); int16x8_t _r2 = vmovl_s8(_r2_s8); int32x4_t _sum03, _sum47; _sum03 = vld1q_lane_s32(outptr0, _sum03, 0);// out0 _sum03 = vld1q_lane_s32(outptr1, _sum03, 1);// out1 _sum03 = vld1q_lane_s32(outptr2, _sum03, 2);// out2 _sum03 = vld1q_lane_s32(outptr3, _sum03, 3);// out3 _sum47 = vld1q_lane_s32(outptr4, _sum47, 0);// out4 _sum47 = vld1q_lane_s32(outptr5, _sum47, 1);// out5 _sum47 = vld1q_lane_s32(outptr6, _sum47, 2);// out6 _sum47 = vld1q_lane_s32(outptr7, _sum47, 3);// out7 // k0 - k2 int8x8_t _k0_8 = vld1_s8(ktmp); //(k00-k70) int8x8_t _k1_8 = vld1_s8(ktmp+8); //(k01-k71) int8x8_t _k2_8 = vld1_s8(ktmp+16); //(k02-k72) int16x8_t _k0 = vmovl_s8(_k0_8); int16x8_t _k1 = vmovl_s8(_k1_8); int16x8_t _k2 = vmovl_s8(_k2_8); int32x4_t _sum0 = vmull_laneq_s16(vget_low_s16(_k0), _r0, 0); int32x4_t _sum0n = vmull_laneq_s16(vget_high_s16(_k0), _r0, 0); int32x4_t _sum1 = vmull_laneq_s16(vget_low_s16(_k1), _r0, 1); int32x4_t _sum1n = vmull_laneq_s16(vget_high_s16(_k1), _r0, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r0, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r0, 2); // k3 - k5 _k0_8 = vld1_s8(ktmp+24); //(k03-k73) _k1_8 = vld1_s8(ktmp+32); //(k04-k74) _k2_8 = vld1_s8(ktmp+40); //(k05-k75) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r1, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r1, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r1, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r1, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r1, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r1, 2); // k6 - k8 _k0_8 = vld1_s8(ktmp+48); //(k06-k76) _k1_8 = vld1_s8(ktmp+56); //(k07-k77) _k2_8 = vld1_s8(ktmp+64); //(k08-k78) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r2, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r2, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r2, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r2, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r2, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r2, 2); _sum0 = vaddq_s32(_sum0, _sum1); _sum0n = vaddq_s32(_sum0n, _sum1n); _sum03 = vaddq_s32(_sum03, _sum0); _sum47 = vaddq_s32(_sum47, _sum0n); vst1q_lane_s32(outptr0, _sum03, 0); vst1q_lane_s32(outptr1, _sum03, 1); vst1q_lane_s32(outptr2, _sum03, 2); vst1q_lane_s32(outptr3, _sum03, 3); vst1q_lane_s32(outptr4, _sum47, 0); vst1q_lane_s32(outptr5, _sum47, 1); vst1q_lane_s32(outptr6, _sum47, 2); vst1q_lane_s32(outptr7, _sum47, 3); outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #else // __aarch64__ asm volatile( "pld [%8, #64] \n" "vld1.s8 {d0}, [%8] \n"// d0(a00 a01 a02 ....) "pld [%9, #64] \n" "vld1.s8 {d2}, [%9] \n"// d2(a10 a11 a12 ....) "pld [%10, #64] \n" "vld1.s8 {d4}, [%10] \n"// d4(a20 a21 a22 ....) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n"// d6(k00-k70) d7(k01-k71) d8(k02-k72) "vmovl.s8 q0, d0 \n"// d0(a00 a01 a02 x) "vmovl.s8 q1, d2 \n"// d2(a10 a11 a12 x) "vmovl.s8 q2, d4 \n"// d4(a20 a21 a22 x) "vmovl.s8 q5, d8 \n"// d10(k02-k32) d11(k42-k72) "vmovl.s8 q4, d7 \n"// d8(k01-k31) d9(k41-k71) "vmovl.s8 q3, d6 \n"// d6(k00-k30) d7(k40-k70) "vld1.s32 {d20[0]}, [%0] \n"// out0 q10 "vld1.s32 {d20[1]}, [%1] \n"// out1 "vld1.s32 {d21[0]}, [%2] \n"// out2 "vld1.s32 {d21[1]}, [%3] \n"// out3 "pld [%11, #64] \n" "vld1.s8 {d24-d26}, [%11]! \n" "vmovl.s8 q14, d26 \n"// d28(k05-k35) d29(k45-k75) "vmovl.s8 q13, d25 \n"// d26(k04-k34) d27(k44-k74) "vmovl.s8 q12, d24 \n"// d24(k03-k33) d25(k43-k73) "vld1.s32 {d22[0]}, [%4] \n"// out4 q11 "vld1.s32 {d22[1]}, [%5] \n"// out5 "vld1.s32 {d23[0]}, [%6] \n"// out6 "vld1.s32 {d23[1]}, [%7] \n"// out7 "vmull.s16 q6, d6, d0[0] \n"// a00 x (k00-k30) "vmull.s16 q7, d7, d0[0] \n"// a00 x (k40-k70) "vmull.s16 q8, d8, d0[1] \n"// a01 x (k01-k31) "vmull.s16 q9, d9, d0[1] \n"// a01 x (k41-k71) "vmlal.s16 q10, d10, d0[2] \n"// a02 x (k02-k32) "vmlal.s16 q11, d11, d0[2] \n"// a02 x (k42-k72) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n" "vmovl.s8 q5, d8 \n"// d10(k08-k38) d11(k48-k78) "vmovl.s8 q4, d7 \n"// d8(k07-k37) d9(k47-k77) "vmovl.s8 q3, d6 \n"// d6(k06-k36) d7(k46-k76) "vmlal.s16 q6, d24, d2[0] \n"// a10 x (k03-k33) "vmlal.s16 q7, d25, d2[0] \n"// a10 x (k43-k73) "vmlal.s16 q8, d26, d2[1] \n"// a11 x (k04-k34) "vmlal.s16 q9, d27, d2[1] \n"// a11 x (k44-k74) "vmlal.s16 q10, d28, d2[2] \n"// a12 x (k05-k35) "vmlal.s16 q11, d29, d2[2] \n"// a12 x (k45-k75) "vmlal.s16 q6, d6, d4[0] \n"// a20 x (k06-k36) "vmlal.s16 q7, d7, d4[0] \n"// a20 x (k46-k76) "vmlal.s16 q8, d8, d4[1] \n"// a21 x (k07-k37) "vmlal.s16 q9, d9, d4[1] \n"// a21 x (k47-k77) "vmlal.s16 q10, d10, d4[2] \n"// a22 x (k08-k38) "vmlal.s16 q11, d11, d4[2] \n"// a22 x (k48-k78) "vadd.s32 q8, q8, q6 \n" "vadd.s32 q9, q9, q7 \n" "sub %11, %11, #72 \n" "vadd.s32 q10, q10, q8 \n" "vadd.s32 q11, q11, q9 \n" "vst1.s32 {d20[0]}, [%0]! \n"// out0 "vst1.s32 {d20[1]}, [%1]! \n"// out1 "vst1.s32 {d21[0]}, [%2]! \n"// out2 "vst1.s32 {d21[1]}, [%3]! \n"// out3 "vst1.s32 {d22[0]}, [%4]! \n"// out4 "vst1.s32 {d22[1]}, [%5]! \n"// out5 "vst1.s32 {d23[0]}, [%6]! \n"// out6 "vst1.s32 {d23[1]}, [%7]! \n"// out7 : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else // __ARM_NEON int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; sum0 += (int)r0[0] * ktmp[0]; sum1 += (int)r0[0] * ktmp[1]; sum2 += (int)r0[0] * ktmp[2]; sum3 += (int)r0[0] * ktmp[3]; sum4 += (int)r0[0] * ktmp[4]; sum5 += (int)r0[0] * ktmp[5]; sum6 += (int)r0[0] * ktmp[6]; sum7 += (int)r0[0] * ktmp[7]; ktmp += 8; sum0 += (int)r0[1] * ktmp[0]; sum1 += (int)r0[1] * ktmp[1]; sum2 += (int)r0[1] * ktmp[2]; sum3 += (int)r0[1] * ktmp[3]; sum4 += (int)r0[1] * ktmp[4]; sum5 += (int)r0[1] * ktmp[5]; sum6 += (int)r0[1] * ktmp[6]; sum7 += (int)r0[1] * ktmp[7]; ktmp += 8; sum0 += (int)r0[2] * ktmp[0]; sum1 += (int)r0[2] * ktmp[1]; sum2 += (int)r0[2] * ktmp[2]; sum3 += (int)r0[2] * ktmp[3]; sum4 += (int)r0[2] * ktmp[4]; sum5 += (int)r0[2] * ktmp[5]; sum6 += (int)r0[2] * ktmp[6]; sum7 += (int)r0[2] * ktmp[7]; ktmp += 8; sum0 += (int)r1[0] * ktmp[0]; sum1 += (int)r1[0] * ktmp[1]; sum2 += (int)r1[0] * ktmp[2]; sum3 += (int)r1[0] * ktmp[3]; sum4 += (int)r1[0] * ktmp[4]; sum5 += (int)r1[0] * ktmp[5]; sum6 += (int)r1[0] * ktmp[6]; sum7 += (int)r1[0] * ktmp[7]; ktmp += 8; sum0 += (int)r1[1] * ktmp[0]; sum1 += (int)r1[1] * ktmp[1]; sum2 += (int)r1[1] * ktmp[2]; sum3 += (int)r1[1] * ktmp[3]; sum4 += (int)r1[1] * ktmp[4]; sum5 += (int)r1[1] * ktmp[5]; sum6 += (int)r1[1] * ktmp[6]; sum7 += (int)r1[1] * ktmp[7]; ktmp += 8; sum0 += (int)r1[2] * ktmp[0]; sum1 += (int)r1[2] * ktmp[1]; sum2 += (int)r1[2] * ktmp[2]; sum3 += (int)r1[2] * ktmp[3]; sum4 += (int)r1[2] * ktmp[4]; sum5 += (int)r1[2] * ktmp[5]; sum6 += (int)r1[2] * ktmp[6]; sum7 += (int)r1[2] * ktmp[7]; ktmp += 8; sum0 += (int)r2[0] * ktmp[0]; sum1 += (int)r2[0] * ktmp[1]; sum2 += (int)r2[0] * ktmp[2]; sum3 += (int)r2[0] * ktmp[3]; sum4 += (int)r2[0] * ktmp[4]; sum5 += (int)r2[0] * ktmp[5]; sum6 += (int)r2[0] * ktmp[6]; sum7 += (int)r2[0] * ktmp[7]; ktmp += 8; sum0 += (int)r2[1] * ktmp[0]; sum1 += (int)r2[1] * ktmp[1]; sum2 += (int)r2[1] * ktmp[2]; sum3 += (int)r2[1] * ktmp[3]; sum4 += (int)r2[1] * ktmp[4]; sum5 += (int)r2[1] * ktmp[5]; sum6 += (int)r2[1] * ktmp[6]; sum7 += (int)r2[1] * ktmp[7]; ktmp += 8; sum0 += (int)r2[2] * ktmp[0]; sum1 += (int)r2[2] * ktmp[1]; sum2 += (int)r2[2] * ktmp[2]; sum3 += (int)r2[2] * ktmp[3]; sum4 += (int)r2[2] * ktmp[4]; sum5 += (int)r2[2] * ktmp[5]; sum6 += (int)r2[2] * ktmp[6]; sum7 += (int)r2[2] * ktmp[7]; ktmp += 8; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; ktmp -= 8*9; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 8*9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char* ktmp = _kernel.channel(p/8 + p%8); for (int q=0; q<inch; q++) { int* outptr = out; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b}, [%5] \n"//ktmp "ld2 {v2.8b, v3.8b}, [%2], #16 \n"//r0-r2 "ld2 {v4.8b, v5.8b}, [%2] \n" "ld2 {v6.8b, v7.8b}, [%3], #16 \n"//r3-r5 "ld2 {v8.8b, v9.8b}, [%3] \n" "ld2 {v10.8b, v11.8b}, [%4], #16 \n"//r6-r8 "ld2 {v12.8b, v13.8b}, [%4] \n" "ld1 {v14.4s, v15.4s}, [%1] \n"//out0 "ext v4.8b, v2.8b, v4.8b, #1 \n" "ext v8.8b, v6.8b, v8.8b, #1 \n" "ext v12.8b, v10.8b, v12.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k0-k7) "sshll v1.8h, v1.8b, #0 \n"//(k8) "sshll v2.8h, v2.8b, #0 \n"// r0 "sshll v3.8h, v3.8b, #0 \n"// r1 "sshll v4.8h, v4.8b, #0 \n"// r2 "sshll v6.8h, v6.8b, #0 \n"// r3 "sshll v7.8h, v7.8b, #0 \n"// r4 "sshll v8.8h, v8.8b, #0 \n"// r5 "sshll v10.8h, v10.8b, #0 \n"// r6 "sshll v11.8h, v11.8b, #0 \n"// r7 "sshll v12.8h, v12.8b, #0 \n"// r8 // r0 "smull v16.4s, v2.4h, v0.h[0] \n"// out = r0*k0 "smull2 v17.4s, v2.8h, v0.h[0] \n" "smull v18.4s, v3.4h, v0.h[1] \n"// outn = r1*k1 "smull2 v19.4s, v3.8h, v0.h[1] \n" "smlal v16.4s, v4.4h, v0.h[2] \n"// out = r2*k2 "smlal2 v17.4s, v4.8h, v0.h[2] \n" "smlal v18.4s, v6.4h, v0.h[3] \n"// outn = r3*k3 "smlal2 v19.4s, v6.8h, v0.h[3] \n" "smlal v16.4s, v7.4h, v0.h[4] \n"// out = r4*k4 "smlal2 v17.4s, v7.8h, v0.h[4] \n" "smlal v18.4s, v8.4h, v0.h[5] \n"// outn = r5*k5 "smlal2 v19.4s, v8.8h, v0.h[5] \n" "smlal v16.4s, v10.4h, v0.h[6] \n"// out = r6*k6 "smlal2 v17.4s, v10.8h, v0.h[6] \n" "smlal v18.4s, v11.4h, v0.h[7] \n"// outn = r7*k7 "smlal2 v19.4s, v11.8h, v0.h[7] \n" "smlal v16.4s, v12.4h, v1.h[0] \n"// out = r8*k8 "smlal2 v17.4s, v12.8h, v1.h[0] \n" "add v8.4s, v16.4s, v18.4s \n" "add v9.4s, v17.4s, v19.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); } #else if (nn > 0) { asm volatile( "vld1.s8 {d0-d1}, [%5] \n"// d0(k0 - k7) d1(k8 ...) "vmovl.s8 q1, d1 \n"// d2(k8 ...) "vmovl.s8 q0, d0 \n"// d0(k0 - k3) d1(k4 - k7) "0: \n" "pld [%2, #192] \n" "vld2.s8 {d4-d5}, [%2]! \n"// r0 d4(a00 a02 ... a014) d5(a01 a03 ... a015) "vld2.s8 {d8-d9}, [%2] \n"// d8(a016 ....) "vld2.s8 {d10-d11}, [%3]! \n"// r1 d10(a10 a12 ... a114) d11(a11 a13 ... a115) "vld2.s8 {d14-d15}, [%3] \n"// d14(a116 ....) "vld2.s8 {d16-d17}, [%4]! \n"// r2 d16(a20 a22 ... a214) d17(a21 a23 ... a215) "vld2.s8 {d20-d21}, [%4] \n"// d20(a216 ....) "vld1.s32 {d22-d25}, [%1] \n"// q11(out0 - out3) q12(out4 - out7) "vext.s8 d8, d4, d8, #1 \n"// d8(a02 a04 ... a016) "vext.s8 d14, d10, d14, #1 \n"// d14(a12 a14 ... a116) "vext.s8 d20, d16, d20, #1 \n"// d20(a22 a24 ... a216) "vmovl.s8 q3, d5 \n"// q3(a01 a03 ... a015) "vmovl.s8 q2, d4 \n"// q2(a00 a02 ... a014) "vmovl.s8 q4, d8 \n"// q4(a02 a04 ... a016) "vmovl.s8 q6, d11 \n"// q6(a11 a13 ... a115) "vmovl.s8 q5, d10 \n"// q5(a10 a12 ... a114) "vmovl.s8 q7, d14 \n"// q7(a12 a14 ... a116) "vmovl.s8 q9, d17 \n"// q9(a21 a23 ... a215) "vmovl.s8 q8, d16 \n"// q8(a20 a22 ... a214) "vmovl.s8 q10, d20 \n"// q10(a22 a24 ... a216) "vmlal.s16 q11, d4, d0[0] \n"// k0 "vmlal.s16 q12, d5, d0[0] \n" "vmull.s16 q13, d6, d0[1] \n"// k1 "vmull.s16 q14, d7, d0[1] \n" "vmlal.s16 q11, d8, d0[2] \n"// k2 "vmlal.s16 q12, d9, d0[2] \n" "vmlal.s16 q13, d12, d1[0] \n"// k4 "vmlal.s16 q14, d13, d1[0] \n" "vmlal.s16 q11, d10, d0[3] \n"// k3 "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q13, d14, d1[1] \n"// k5 "vmlal.s16 q14, d15, d1[1] \n" "vmlal.s16 q11, d16, d1[2] \n"// k6 "vmlal.s16 q12, d17, d1[2] \n" "vmlal.s16 q13, d18, d1[3] \n"// k7 "vmlal.s16 q14, d19, d1[3] \n" "vmlal.s16 q11, d20, d2[0] \n"// k8 "vmlal.s16 q12, d21, d2[0] \n" "vadd.s32 q11, q11, q13 \n" "vadd.s32 q12, q12, q14 \n" "vst1.32 {d22-d25}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON if (remain > 0) { #if __ARM_NEON int8x8_t _k01234567s8 = vld1_s8(ktmp); int8x8_t _k8xxxxxxxs8 = vld1_s8(ktmp+8); int8x8_t _k34567xxxs8 = vext_s8(_k01234567s8, _k01234567s8, 3); int8x8_t _k678xxxxxs8 = vext_s8(_k01234567s8, _k8xxxxxxxs8, 6); int16x8_t _k0123_s16 = vmovl_s8(_k01234567s8); int16x8_t _k3456_s16 = vmovl_s8(_k34567xxxs8); int16x8_t _k678x_s16 = vmovl_s8(_k678xxxxxs8); #endif for (; remain>0; remain--) { #if __ARM_NEON int8x8_t _r00s8 = vld1_s8(r0); int8x8_t _r10s8 = vld1_s8(r1); int8x8_t _r20s8 = vld1_s8(r2); int16x8_t _r00s16 = vmovl_s8(_r00s8); int16x8_t _r10s16 = vmovl_s8(_r10s8); int16x8_t _r20s16 = vmovl_s8(_r20s8); int32x4_t _sum = vmull_s16(vget_low_s16(_r00s16), vget_low_s16(_k0123_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r10s16), vget_low_s16(_k3456_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r20s16), vget_low_s16(_k678x_s16)); _sum = vsetq_lane_s32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_s32(_sum); #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); *outptr = vget_lane_s32(_ss, 0); #endif // __aarch64__ #else int sum = 0; sum += (int)r0[0] * ktmp[0]; sum += (int)r0[1] * ktmp[1]; sum += (int)r0[2] * ktmp[2]; sum += (int)r1[0] * ktmp[3]; sum += (int)r1[1] * ktmp[4]; sum += (int)r1[2] * ktmp[5]; sum += (int)r2[0] * ktmp[6]; sum += (int)r2[1] * ktmp[7]; sum += (int)r2[2] * ktmp[8]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 9; } } } static void conv3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); } static void conv3x3s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); }
GB_unaryop__lnot_int8_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int8_int32 // op(A') function: GB_tran__lnot_int8_int32 // C type: int8_t // A type: int32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int8_int32 ( int8_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
app.c
/** * Christina Giannoula * cgiannoula: christina.giann@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <dpu.h> #include <dpu_log.h> #include <unistd.h> #include <getopt.h> #include <assert.h> #include <math.h> #include <omp.h> #include "../support/common.h" #include "../support/matrix.h" #include "../support/params.h" #include "../support/partition.h" #include "../support/timer.h" #include "../support/utils.h" // Define the DPU Binary path as DPU_BINARY here. #ifndef DPU_BINARY #define DPU_BINARY "./bin/spmv_dpu" #endif #define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB /* * Main Structures: * 1. Matrices * 2. Input vector * 3. Output vector * 4. Help structures for data partitioning */ static struct RBDBCSRMatrix* A; static struct RBDCSRMatrix* B; static struct COOMatrix* C; static val_dt* x; static val_dt* y; static val_dt* z; static struct partition_info_t *part_info; /** * @brief Specific information for each DPU */ struct dpu_info_t { uint32_t block_rows_per_dpu; uint32_t prev_block_rows_dpu; uint32_t cols_per_dpu; uint32_t block_start; uint32_t blocks; uint32_t blocks_pad; uint32_t prev_blocks_dpu; uint32_t ptr_offset; uint32_t merge; }; struct dpu_info_t *dpu_info; /** * @brief find the dpus_per_row_partition * @param factor n to create partitions * @param column_partitions to create vert_partitions * @param horz_partitions to return the 2D partitioning */ void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) { uint32_t dpus_per_vert_partition = n / vert_partitions; *horz_partitions = dpus_per_vert_partition; } /** * @brief initialize input vector * @param pointer to input vector and vector size */ void init_vector(val_dt* vec, uint32_t size) { for(unsigned int i = 0; i < size; ++i) { vec[i] = (val_dt) (i%4+1); } } /** * @brief compute output in the host CPU */ static void spmv_host(val_dt* y, struct RBDBCSRMatrix *A, val_dt* x) { uint64_t total_blocks = 0; for (uint32_t c = 0; c < A->vert_partitions; c++) { uint32_t ptr_offset = c * (A->num_block_rows + 1); uint32_t col_offset = c * A->tile_width; for(uint64_t n=0; n < A->num_block_rows; n++) { for(uint64_t i=A->browptr[ptr_offset + n]; i<A->browptr[ptr_offset + n+1]; i++){ uint64_t j = A->bcolind[total_blocks + i]; for(uint64_t blr=0; blr < A->row_block_size; blr++){ val_dt acc = 0; for(uint64_t blc=0; blc < A->col_block_size; blc++) { acc += A->bval[(total_blocks + i) * A->col_block_size * A->row_block_size + blr * A->col_block_size + blc] * x[col_offset + j * A->col_block_size + blc]; } y[n * A->row_block_size + blr] += acc; } } } total_blocks += A->blocks_per_vert_partition[c]; } } /** * @brief main of the host application */ int main(int argc, char **argv) { struct Params p = input_params(argc, argv); struct dpu_set_t dpu_set, dpu; uint32_t nr_of_dpus; uint32_t nr_of_ranks; // Allocate DPUs and load binary DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set)); DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL)); DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus)); DPU_ASSERT(dpu_get_nr_ranks(dpu_set, &nr_of_ranks)); printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus); printf("[INFO] Allocated %d Rank(s)\n", nr_of_ranks); printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS); unsigned int i; // Initialize input data C = readCOOMatrix(p.fileName); sortCOOMatrix(C); uint32_t horz_partitions = 0; uint32_t vert_partitions = p.vert_partitions; find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions); printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions); B = coo2rbdcsr(C, horz_partitions, vert_partitions); freeCOOMatrix(C); A = rbdcsr2rbdbcsr(B, p.row_blsize, p.col_blsize); countNNZperBlockRBDBCSRMatrix(A); freeRBDCSRMatrix(B); // Initialize partition data part_info = partition_init(A, nr_of_dpus, p.max_nranks, NR_TASKLETS); #if FG_TRANS struct dpu_set_t rank; uint32_t each_rank; DPU_RANK_FOREACH(dpu_set, rank, each_rank){ uint32_t nr_dpus_in_rank; DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank)); part_info->active_dpus_per_rank[each_rank+1] = nr_dpus_in_rank; } int sum = 0; for(int i=0; i < p.max_nranks+1; i++) { part_info->accum_dpus_ranks[i] = part_info->active_dpus_per_rank[i] + sum; sum += part_info->active_dpus_per_rank[i]; } #endif // Initialize help data - Padding needed uint32_t ncols_pad = A->ncols + A->tile_width + A->col_block_size; uint32_t tile_width_pad = A->num_block_cols * A->col_block_size; uint32_t nrows_pad = A->nrows + A->row_block_size; if (ncols_pad % (8 / byte_dt) != 0) ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt))); if (tile_width_pad % (8 / byte_dt) != 0) tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt))); #if INT8 if (tile_width_pad % 2 != 0) tile_width_pad++; #endif if (nrows_pad % (8 / byte_dt) != 0) nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt))); // Allocate input vector x = (val_dt *) malloc(ncols_pad * sizeof(val_dt)); // Allocate output vector z = (val_dt *) calloc(nrows_pad, sizeof(val_dt)); // Initialize input vector with arbitrary data init_vector(x, ncols_pad); // Load-balance nnzs (at block-row granularity) across DPUs of the same vertical partition partition_by_nnz(A, part_info); // Initialize help data dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t)); dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t)); // Max limits for parallel transfers uint64_t max_block_rows_per_dpu = 0; uint64_t max_blocks_per_dpu = 0; // Timer for measurements Timer timer; i = 0; uint32_t acc_blocks = 0; uint32_t total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { // Find padding for block rows and non-zero elements needed for CPU-DPU transfers uint32_t tile_horz_indx = i % A->horz_partitions; uint32_t tile_vert_indx = i / A->horz_partitions; uint32_t block_rows_per_dpu = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx + 1] - part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx]; uint32_t block_rows_per_dpu_pad = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx + 1] - part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx] + 1; uint32_t prev_block_rows_dpu = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx]; if (block_rows_per_dpu_pad > max_block_rows_per_dpu) max_block_rows_per_dpu = block_rows_per_dpu_pad; unsigned int blocks, blocks_pad; blocks = A->browptr[tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu + block_rows_per_dpu] - A->browptr[tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu]; assert(blocks == part_info->blocks_dpu[i]); if (blocks % 2 != 0) // bcolind blocks_pad = blocks + 1; else blocks_pad = blocks; if (blocks_pad > max_blocks_per_dpu) max_blocks_per_dpu = blocks_pad; // Keep information per DPU dpu_info[i].block_rows_per_dpu = block_rows_per_dpu; dpu_info[i].prev_block_rows_dpu = prev_block_rows_dpu; dpu_info[i].cols_per_dpu = A->tile_width; dpu_info[i].blocks = blocks; dpu_info[i].blocks_pad = blocks_pad; dpu_info[i].prev_blocks_dpu = total_blocks; dpu_info[i].ptr_offset = tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu; // Find input arguments per DPU input_args[i].block_rows = block_rows_per_dpu; input_args[i].tcols = tile_width_pad; input_args[i].row_block_size = A->row_block_size; input_args[i].col_block_size = A->col_block_size; #if BLNC_TSKLT_BLOCK // Load-balance blocks across tasklets partition_tsklt_by_block(A, part_info, i, NR_TASKLETS, nr_of_dpus, acc_blocks, prev_block_rows_dpu, block_rows_per_dpu, tile_vert_indx); #else // Load-balance nnzs across tasklets partition_tsklt_by_nnz(A, part_info, i, NR_TASKLETS, nr_of_dpus, acc_blocks, prev_block_rows_dpu, block_rows_per_dpu, tile_vert_indx); #endif uint32_t t; for (t = 0; t < NR_TASKLETS; t++) { // Find input arguments per tasklet input_args[i].start_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + t]; input_args[i].end_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + (t+1)]; } if (tile_horz_indx == (A->horz_partitions - 1)) acc_blocks += A->blocks_per_vert_partition[tile_vert_indx]; total_blocks += part_info->blocks_dpu[i]; } #if FG_TRANS // Find max number of block rows (subset of elements of the output vector) among DPUs of each rank DPU_RANK_FOREACH(dpu_set, rank, each_rank){ uint32_t max_block_rows_cur_rank = 0; uint32_t nr_dpus_in_rank; DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank)); uint32_t start_dpu = part_info->accum_dpus_ranks[each_rank]; for (uint32_t k = 0; k < nr_dpus_in_rank; k++) { if (start_dpu + k >= nr_of_dpus) break; if (dpu_info[start_dpu + k].block_rows_per_dpu > max_block_rows_cur_rank) max_block_rows_cur_rank = dpu_info[start_dpu + k].block_rows_per_dpu; } if (max_block_rows_cur_rank % 2 != 0) max_block_rows_cur_rank++; part_info->max_block_rows_per_rank[each_rank] = (uint32_t) max_block_rows_cur_rank; } #endif // Initializations for parallel transfers with padding needed if (max_block_rows_per_dpu % 2 != 0) max_block_rows_per_dpu++; if (max_blocks_per_dpu % 2 != 0) max_blocks_per_dpu++; // Re-allocations for padding needed A->browptr = (uint32_t *) realloc(A->browptr, (max_block_rows_per_dpu * nr_of_dpus * sizeof(uint32_t))); A->bcolind = (uint32_t *) realloc(A->bcolind, (max_blocks_per_dpu * nr_of_dpus * sizeof(uint32_t))); A->bval = (val_dt *) realloc(A->bval, (max_blocks_per_dpu * A->row_block_size * A->col_block_size * nr_of_dpus * sizeof(val_dt))); y = (val_dt *) calloc((uint64_t) ((uint64_t) nr_of_dpus * (uint64_t) max_block_rows_per_dpu * A->row_block_size), sizeof(val_dt)); // Count total number of bytes to be transfered in MRAM of DPU unsigned long int total_bytes; total_bytes = ((max_block_rows_per_dpu) * sizeof(uint32_t)) + (max_blocks_per_dpu * sizeof(uint32_t)) + (max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt)) + (tile_width_pad * sizeof(val_dt)) + (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt)); assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size"); // Copy input arguments to DPUs i = 0; DPU_FOREACH(dpu_set, dpu, i) { input_args[i].max_block_rows = max_block_rows_per_dpu; input_args[i].max_blocks = max_blocks_per_dpu; DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT)); // Copy input matrix to DPUs startTimer(&timer, 0); // Copy Browptr i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->browptr + dpu_info[i].ptr_offset)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt)), max_block_rows_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT)); // Copy Bcolind i = 0; total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->bcolind + total_blocks)); total_blocks += part_info->blocks_dpu[i]; } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT)); // Copy Bvalues i = 0; total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->bval + ((uint64_t) total_blocks * A->row_block_size * A->col_block_size))); total_blocks += part_info->blocks_dpu[i]; } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t) + max_blocks_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt), DPU_XFER_DEFAULT)); stopTimer(&timer, 0); // Copy input vector to DPUs startTimer(&timer, 1); i = 0; DPU_FOREACH(dpu_set, dpu, i) { uint32_t tile_vert_indx = i / A->horz_partitions; DPU_ASSERT(dpu_prepare_xfer(dpu, x + tile_vert_indx * A->tile_width)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT)); stopTimer(&timer, 1); // Run kernel on DPUs startTimer(&timer, 2); DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS)); stopTimer(&timer, 2); #if LOG // Display DPU Log (default: disabled) DPU_FOREACH(dpu_set, dpu) { DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout)); } #endif // Retrieve results for output vector from DPUs startTimer(&timer, 3); #if CG_TRANS // Coarse-grained data transfers in the output vector i = 0; uint32_t block_rows_footprint = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size))); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), DPU_XFER_DEFAULT)); #endif #if FG_TRANS // Fine-grained data transfers in the output vector at rank granularity i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size))); } i = 0; //struct dpu_set_t rank; DPU_RANK_FOREACH(dpu_set, rank) { DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, part_info->max_block_rows_per_rank[i] * A->row_block_size * sizeof(val_dt), DPU_XFER_ASYNC)); i++; } DPU_ASSERT(dpu_sync(dpu_set)); #endif stopTimer(&timer, 3); // Merge partial results to the host CPU startTimer(&timer, 4); uint32_t r, c, t, b; for (c = 0; c < A->vert_partitions; c++) { for (r = 0; r < A->horz_partitions; r++) { #pragma omp parallel for num_threads(p.nthreads) shared(A, z, y, max_block_rows_per_dpu, r, c) private(t, b) for (t = 0; t < part_info->brow_split[c * (A->horz_partitions + 1) + r+1] - part_info->brow_split[c * (A->horz_partitions + 1) + r]; t++) { for (b = 0; b < A->row_block_size; b++) { z[(part_info->brow_split[c * (A->horz_partitions + 1) + r] + t) * A->row_block_size + b] += y[(c * A->horz_partitions + r) * max_block_rows_per_dpu * A->row_block_size + t * A->row_block_size + b]; } } } } stopTimer(&timer, 4); // Print timing results printf("\n"); printf("Load Matrix "); printTimer(&timer, 0); printf("Load Input Vector "); printTimer(&timer, 1); printf("Kernel "); printTimer(&timer, 2); printf("Retrieve Output Vector "); printTimer(&timer, 3); printf("Merge Partial Results "); printTimer(&timer, 4); printf("\n\n"); #if CHECK_CORR // Check output startTimer(&timer, 4); val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt)); spmv_host(y_host, A, x); bool status = true; i = 0; for (i = 0; i < A->nrows; i++) { if(y_host[i] != z[i]) { status = false; } } if (status) { printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n"); } else { printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n"); } free(y_host); #endif // Deallocation freeRBDBCSRMatrix(A); free(x); free(y); free(z); partition_free(part_info); DPU_ASSERT(dpu_free(dpu_set)); return 0; }
CRC64.h
/* * Copyright (C) 2015, UChicago Argonne, LLC * All Rights Reserved * * Generic IO (ANL-15-066) * Hal Finkel, Argonne National Laboratory * * OPEN SOURCE LICENSE * * Under the terms of Contract No. DE-AC02-06CH11357 with UChicago Argonne, * LLC, the U.S. Government retains certain rights in this software. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the names of UChicago Argonne, LLC or the Department of Energy * nor the names of its contributors may be used to endorse or promote * products derived from this software without specific prior written * permission. * * ***************************************************************************** * * DISCLAIMER * THE SOFTWARE IS SUPPLIED “AS IS” WITHOUT WARRANTY OF ANY KIND. NEITHER THE * UNTED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT OF ENERGY, NOR * UCHICAGO ARGONNE, LLC, NOR ANY OF THEIR EMPLOYEES, MAKES ANY WARRANTY, * EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE * ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY INFORMATION, DATA, APPARATUS, * PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT INFRINGE * PRIVATELY OWNED RIGHTS. * * ***************************************************************************** */ #ifndef CRC64_H #define CRC64_H #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #include <cstdlib> #include <stdint.h> #ifdef _OPENMP #include <omp.h> #endif // These functions compute the CRC-64 checksum on a block of data // and provide a way to combine the checksums on two blocks of data. // For more information, see: // http://en.wikipedia.org/wiki/Computation_of_CRC // http://checksumcrc.blogspot.com/2011/12/should-you-use-crc-or-checksum.html // http://crcutil.googlecode.com/files/crc-doc.1.0.pdf // http://www.ross.net/crc/download/crc_v3.txt // This uses the ECMA-182 polynomial with -1 initialization, and computes // the bit-reversed CRC. // The polynomial here is the bit-reversed encoding of 0x42f0e1eba9ea3693. static const uint64_t crc64_poly = UINT64_C(0xc96c5795d7870f42); static const uint64_t crc64_table[4][256] ={ { UINT64_C(0x0000000000000000), UINT64_C(0x1dee8a5e222ca1dc), UINT64_C(0x3bdd14bc445943b8), UINT64_C(0x26339ee26675e264), UINT64_C(0x77ba297888b28770), UINT64_C(0x6a54a326aa9e26ac), UINT64_C(0x4c673dc4ccebc4c8), UINT64_C(0x5189b79aeec76514), UINT64_C(0xef7452f111650ee0), UINT64_C(0xf29ad8af3349af3c), UINT64_C(0xd4a9464d553c4d58), UINT64_C(0xc947cc137710ec84), UINT64_C(0x98ce7b8999d78990), UINT64_C(0x8520f1d7bbfb284c), UINT64_C(0xa3136f35dd8eca28), UINT64_C(0xbefde56bffa26bf4), UINT64_C(0x4c300ac98dc40345), UINT64_C(0x51de8097afe8a299), UINT64_C(0x77ed1e75c99d40fd), UINT64_C(0x6a03942bebb1e121), UINT64_C(0x3b8a23b105768435), UINT64_C(0x2664a9ef275a25e9), UINT64_C(0x0057370d412fc78d), UINT64_C(0x1db9bd5363036651), UINT64_C(0xa34458389ca10da5), UINT64_C(0xbeaad266be8dac79), UINT64_C(0x98994c84d8f84e1d), UINT64_C(0x8577c6dafad4efc1), UINT64_C(0xd4fe714014138ad5), UINT64_C(0xc910fb1e363f2b09), UINT64_C(0xef2365fc504ac96d), UINT64_C(0xf2cdefa2726668b1), UINT64_C(0x986015931b88068a), UINT64_C(0x858e9fcd39a4a756), UINT64_C(0xa3bd012f5fd14532), UINT64_C(0xbe538b717dfde4ee), UINT64_C(0xefda3ceb933a81fa), UINT64_C(0xf234b6b5b1162026), UINT64_C(0xd4072857d763c242), UINT64_C(0xc9e9a209f54f639e), UINT64_C(0x771447620aed086a), UINT64_C(0x6afacd3c28c1a9b6), UINT64_C(0x4cc953de4eb44bd2), UINT64_C(0x5127d9806c98ea0e), UINT64_C(0x00ae6e1a825f8f1a), UINT64_C(0x1d40e444a0732ec6), UINT64_C(0x3b737aa6c606cca2), UINT64_C(0x269df0f8e42a6d7e), UINT64_C(0xd4501f5a964c05cf), UINT64_C(0xc9be9504b460a413), UINT64_C(0xef8d0be6d2154677), UINT64_C(0xf26381b8f039e7ab), UINT64_C(0xa3ea36221efe82bf), UINT64_C(0xbe04bc7c3cd22363), UINT64_C(0x9837229e5aa7c107), UINT64_C(0x85d9a8c0788b60db), UINT64_C(0x3b244dab87290b2f), UINT64_C(0x26cac7f5a505aaf3), UINT64_C(0x00f95917c3704897), UINT64_C(0x1d17d349e15ce94b), UINT64_C(0x4c9e64d30f9b8c5f), UINT64_C(0x5170ee8d2db72d83), UINT64_C(0x7743706f4bc2cfe7), UINT64_C(0x6aadfa3169ee6e3b), UINT64_C(0xa218840d981e1391), UINT64_C(0xbff60e53ba32b24d), UINT64_C(0x99c590b1dc475029), UINT64_C(0x842b1aeffe6bf1f5), UINT64_C(0xd5a2ad7510ac94e1), UINT64_C(0xc84c272b3280353d), UINT64_C(0xee7fb9c954f5d759), UINT64_C(0xf391339776d97685), UINT64_C(0x4d6cd6fc897b1d71), UINT64_C(0x50825ca2ab57bcad), UINT64_C(0x76b1c240cd225ec9), UINT64_C(0x6b5f481eef0eff15), UINT64_C(0x3ad6ff8401c99a01), UINT64_C(0x273875da23e53bdd), UINT64_C(0x010beb384590d9b9), UINT64_C(0x1ce5616667bc7865), UINT64_C(0xee288ec415da10d4), UINT64_C(0xf3c6049a37f6b108), UINT64_C(0xd5f59a785183536c), UINT64_C(0xc81b102673aff2b0), UINT64_C(0x9992a7bc9d6897a4), UINT64_C(0x847c2de2bf443678), UINT64_C(0xa24fb300d931d41c), UINT64_C(0xbfa1395efb1d75c0), UINT64_C(0x015cdc3504bf1e34), UINT64_C(0x1cb2566b2693bfe8), UINT64_C(0x3a81c88940e65d8c), UINT64_C(0x276f42d762cafc50), UINT64_C(0x76e6f54d8c0d9944), UINT64_C(0x6b087f13ae213898), UINT64_C(0x4d3be1f1c854dafc), UINT64_C(0x50d56bafea787b20), UINT64_C(0x3a78919e8396151b), UINT64_C(0x27961bc0a1bab4c7), UINT64_C(0x01a58522c7cf56a3), UINT64_C(0x1c4b0f7ce5e3f77f), UINT64_C(0x4dc2b8e60b24926b), UINT64_C(0x502c32b8290833b7), UINT64_C(0x761fac5a4f7dd1d3), UINT64_C(0x6bf126046d51700f), UINT64_C(0xd50cc36f92f31bfb), UINT64_C(0xc8e24931b0dfba27), UINT64_C(0xeed1d7d3d6aa5843), UINT64_C(0xf33f5d8df486f99f), UINT64_C(0xa2b6ea171a419c8b), UINT64_C(0xbf586049386d3d57), UINT64_C(0x996bfeab5e18df33), UINT64_C(0x848574f57c347eef), UINT64_C(0x76489b570e52165e), UINT64_C(0x6ba611092c7eb782), UINT64_C(0x4d958feb4a0b55e6), UINT64_C(0x507b05b56827f43a), UINT64_C(0x01f2b22f86e0912e), UINT64_C(0x1c1c3871a4cc30f2), UINT64_C(0x3a2fa693c2b9d296), UINT64_C(0x27c12ccde095734a), UINT64_C(0x993cc9a61f3718be), UINT64_C(0x84d243f83d1bb962), UINT64_C(0xa2e1dd1a5b6e5b06), UINT64_C(0xbf0f57447942fada), UINT64_C(0xee86e0de97859fce), UINT64_C(0xf3686a80b5a93e12), UINT64_C(0xd55bf462d3dcdc76), UINT64_C(0xc8b57e3cf1f07daa), UINT64_C(0xd6e9a7309f3239a7), UINT64_C(0xcb072d6ebd1e987b), UINT64_C(0xed34b38cdb6b7a1f), UINT64_C(0xf0da39d2f947dbc3), UINT64_C(0xa1538e481780bed7), UINT64_C(0xbcbd041635ac1f0b), UINT64_C(0x9a8e9af453d9fd6f), UINT64_C(0x876010aa71f55cb3), UINT64_C(0x399df5c18e573747), UINT64_C(0x24737f9fac7b969b), UINT64_C(0x0240e17dca0e74ff), UINT64_C(0x1fae6b23e822d523), UINT64_C(0x4e27dcb906e5b037), UINT64_C(0x53c956e724c911eb), UINT64_C(0x75fac80542bcf38f), UINT64_C(0x6814425b60905253), UINT64_C(0x9ad9adf912f63ae2), UINT64_C(0x873727a730da9b3e), UINT64_C(0xa104b94556af795a), UINT64_C(0xbcea331b7483d886), UINT64_C(0xed6384819a44bd92), UINT64_C(0xf08d0edfb8681c4e), UINT64_C(0xd6be903dde1dfe2a), UINT64_C(0xcb501a63fc315ff6), UINT64_C(0x75adff0803933402), UINT64_C(0x6843755621bf95de), UINT64_C(0x4e70ebb447ca77ba), UINT64_C(0x539e61ea65e6d666), UINT64_C(0x0217d6708b21b372), UINT64_C(0x1ff95c2ea90d12ae), UINT64_C(0x39cac2cccf78f0ca), UINT64_C(0x24244892ed545116), UINT64_C(0x4e89b2a384ba3f2d), UINT64_C(0x536738fda6969ef1), UINT64_C(0x7554a61fc0e37c95), UINT64_C(0x68ba2c41e2cfdd49), UINT64_C(0x39339bdb0c08b85d), UINT64_C(0x24dd11852e241981), UINT64_C(0x02ee8f674851fbe5), UINT64_C(0x1f0005396a7d5a39), UINT64_C(0xa1fde05295df31cd), UINT64_C(0xbc136a0cb7f39011), UINT64_C(0x9a20f4eed1867275), UINT64_C(0x87ce7eb0f3aad3a9), UINT64_C(0xd647c92a1d6db6bd), UINT64_C(0xcba943743f411761), UINT64_C(0xed9add965934f505), UINT64_C(0xf07457c87b1854d9), UINT64_C(0x02b9b86a097e3c68), UINT64_C(0x1f5732342b529db4), UINT64_C(0x3964acd64d277fd0), UINT64_C(0x248a26886f0bde0c), UINT64_C(0x7503911281ccbb18), UINT64_C(0x68ed1b4ca3e01ac4), UINT64_C(0x4ede85aec595f8a0), UINT64_C(0x53300ff0e7b9597c), UINT64_C(0xedcdea9b181b3288), UINT64_C(0xf02360c53a379354), UINT64_C(0xd610fe275c427130), UINT64_C(0xcbfe74797e6ed0ec), UINT64_C(0x9a77c3e390a9b5f8), UINT64_C(0x879949bdb2851424), UINT64_C(0xa1aad75fd4f0f640), UINT64_C(0xbc445d01f6dc579c), UINT64_C(0x74f1233d072c2a36), UINT64_C(0x691fa96325008bea), UINT64_C(0x4f2c37814375698e), UINT64_C(0x52c2bddf6159c852), UINT64_C(0x034b0a458f9ead46), UINT64_C(0x1ea5801badb20c9a), UINT64_C(0x38961ef9cbc7eefe), UINT64_C(0x257894a7e9eb4f22), UINT64_C(0x9b8571cc164924d6), UINT64_C(0x866bfb923465850a), UINT64_C(0xa05865705210676e), UINT64_C(0xbdb6ef2e703cc6b2), UINT64_C(0xec3f58b49efba3a6), UINT64_C(0xf1d1d2eabcd7027a), UINT64_C(0xd7e24c08daa2e01e), UINT64_C(0xca0cc656f88e41c2), UINT64_C(0x38c129f48ae82973), UINT64_C(0x252fa3aaa8c488af), UINT64_C(0x031c3d48ceb16acb), UINT64_C(0x1ef2b716ec9dcb17), UINT64_C(0x4f7b008c025aae03), UINT64_C(0x52958ad220760fdf), UINT64_C(0x74a614304603edbb), UINT64_C(0x69489e6e642f4c67), UINT64_C(0xd7b57b059b8d2793), UINT64_C(0xca5bf15bb9a1864f), UINT64_C(0xec686fb9dfd4642b), UINT64_C(0xf186e5e7fdf8c5f7), UINT64_C(0xa00f527d133fa0e3), UINT64_C(0xbde1d8233113013f), UINT64_C(0x9bd246c15766e35b), UINT64_C(0x863ccc9f754a4287), UINT64_C(0xec9136ae1ca42cbc), UINT64_C(0xf17fbcf03e888d60), UINT64_C(0xd74c221258fd6f04), UINT64_C(0xcaa2a84c7ad1ced8), UINT64_C(0x9b2b1fd69416abcc), UINT64_C(0x86c59588b63a0a10), UINT64_C(0xa0f60b6ad04fe874), UINT64_C(0xbd188134f26349a8), UINT64_C(0x03e5645f0dc1225c), UINT64_C(0x1e0bee012fed8380), UINT64_C(0x383870e3499861e4), UINT64_C(0x25d6fabd6bb4c038), UINT64_C(0x745f4d278573a52c), UINT64_C(0x69b1c779a75f04f0), UINT64_C(0x4f82599bc12ae694), UINT64_C(0x526cd3c5e3064748), UINT64_C(0xa0a13c6791602ff9), UINT64_C(0xbd4fb639b34c8e25), UINT64_C(0x9b7c28dbd5396c41), UINT64_C(0x8692a285f715cd9d), UINT64_C(0xd71b151f19d2a889), UINT64_C(0xcaf59f413bfe0955), UINT64_C(0xecc601a35d8beb31), UINT64_C(0xf1288bfd7fa74aed), UINT64_C(0x4fd56e9680052119), UINT64_C(0x523be4c8a22980c5), UINT64_C(0x74087a2ac45c62a1), UINT64_C(0x69e6f074e670c37d), UINT64_C(0x386f47ee08b7a669), UINT64_C(0x2581cdb02a9b07b5), UINT64_C(0x03b253524ceee5d1), UINT64_C(0x1e5cd90c6ec2440d) }, { UINT64_C(0x0000000000000000), UINT64_C(0x3f0be14a916a6dcb), UINT64_C(0x7e17c29522d4db96), UINT64_C(0x411c23dfb3beb65d), UINT64_C(0xfc2f852a45a9b72c), UINT64_C(0xc3246460d4c3dae7), UINT64_C(0x823847bf677d6cba), UINT64_C(0xbd33a6f5f6170171), UINT64_C(0x6a87a57f245d70dd), UINT64_C(0x558c4435b5371d16), UINT64_C(0x149067ea0689ab4b), UINT64_C(0x2b9b86a097e3c680), UINT64_C(0x96a8205561f4c7f1), UINT64_C(0xa9a3c11ff09eaa3a), UINT64_C(0xe8bfe2c043201c67), UINT64_C(0xd7b4038ad24a71ac), UINT64_C(0xd50f4afe48bae1ba), UINT64_C(0xea04abb4d9d08c71), UINT64_C(0xab18886b6a6e3a2c), UINT64_C(0x94136921fb0457e7), UINT64_C(0x2920cfd40d135696), UINT64_C(0x162b2e9e9c793b5d), UINT64_C(0x57370d412fc78d00), UINT64_C(0x683cec0bbeade0cb), UINT64_C(0xbf88ef816ce79167), UINT64_C(0x80830ecbfd8dfcac), UINT64_C(0xc19f2d144e334af1), UINT64_C(0xfe94cc5edf59273a), UINT64_C(0x43a76aab294e264b), UINT64_C(0x7cac8be1b8244b80), UINT64_C(0x3db0a83e0b9afddd), UINT64_C(0x02bb49749af09016), UINT64_C(0x38c63ad73e7bddf1), UINT64_C(0x07cddb9daf11b03a), UINT64_C(0x46d1f8421caf0667), UINT64_C(0x79da19088dc56bac), UINT64_C(0xc4e9bffd7bd26add), UINT64_C(0xfbe25eb7eab80716), UINT64_C(0xbafe7d685906b14b), UINT64_C(0x85f59c22c86cdc80), UINT64_C(0x52419fa81a26ad2c), UINT64_C(0x6d4a7ee28b4cc0e7), UINT64_C(0x2c565d3d38f276ba), UINT64_C(0x135dbc77a9981b71), UINT64_C(0xae6e1a825f8f1a00), UINT64_C(0x9165fbc8cee577cb), UINT64_C(0xd079d8177d5bc196), UINT64_C(0xef72395dec31ac5d), UINT64_C(0xedc9702976c13c4b), UINT64_C(0xd2c29163e7ab5180), UINT64_C(0x93deb2bc5415e7dd), UINT64_C(0xacd553f6c57f8a16), UINT64_C(0x11e6f50333688b67), UINT64_C(0x2eed1449a202e6ac), UINT64_C(0x6ff1379611bc50f1), UINT64_C(0x50fad6dc80d63d3a), UINT64_C(0x874ed556529c4c96), UINT64_C(0xb845341cc3f6215d), UINT64_C(0xf95917c370489700), UINT64_C(0xc652f689e122facb), UINT64_C(0x7b61507c1735fbba), UINT64_C(0x446ab136865f9671), UINT64_C(0x057692e935e1202c), UINT64_C(0x3a7d73a3a48b4de7), UINT64_C(0x718c75ae7cf7bbe2), UINT64_C(0x4e8794e4ed9dd629), UINT64_C(0x0f9bb73b5e236074), UINT64_C(0x30905671cf490dbf), UINT64_C(0x8da3f084395e0cce), UINT64_C(0xb2a811cea8346105), UINT64_C(0xf3b432111b8ad758), UINT64_C(0xccbfd35b8ae0ba93), UINT64_C(0x1b0bd0d158aacb3f), UINT64_C(0x2400319bc9c0a6f4), UINT64_C(0x651c12447a7e10a9), UINT64_C(0x5a17f30eeb147d62), UINT64_C(0xe72455fb1d037c13), UINT64_C(0xd82fb4b18c6911d8), UINT64_C(0x9933976e3fd7a785), UINT64_C(0xa6387624aebdca4e), UINT64_C(0xa4833f50344d5a58), UINT64_C(0x9b88de1aa5273793), UINT64_C(0xda94fdc5169981ce), UINT64_C(0xe59f1c8f87f3ec05), UINT64_C(0x58acba7a71e4ed74), UINT64_C(0x67a75b30e08e80bf), UINT64_C(0x26bb78ef533036e2), UINT64_C(0x19b099a5c25a5b29), UINT64_C(0xce049a2f10102a85), UINT64_C(0xf10f7b65817a474e), UINT64_C(0xb01358ba32c4f113), UINT64_C(0x8f18b9f0a3ae9cd8), UINT64_C(0x322b1f0555b99da9), UINT64_C(0x0d20fe4fc4d3f062), UINT64_C(0x4c3cdd90776d463f), UINT64_C(0x73373cdae6072bf4), UINT64_C(0x494a4f79428c6613), UINT64_C(0x7641ae33d3e60bd8), UINT64_C(0x375d8dec6058bd85), UINT64_C(0x08566ca6f132d04e), UINT64_C(0xb565ca530725d13f), UINT64_C(0x8a6e2b19964fbcf4), UINT64_C(0xcb7208c625f10aa9), UINT64_C(0xf479e98cb49b6762), UINT64_C(0x23cdea0666d116ce), UINT64_C(0x1cc60b4cf7bb7b05), UINT64_C(0x5dda28934405cd58), UINT64_C(0x62d1c9d9d56fa093), UINT64_C(0xdfe26f2c2378a1e2), UINT64_C(0xe0e98e66b212cc29), UINT64_C(0xa1f5adb901ac7a74), UINT64_C(0x9efe4cf390c617bf), UINT64_C(0x9c4505870a3687a9), UINT64_C(0xa34ee4cd9b5cea62), UINT64_C(0xe252c71228e25c3f), UINT64_C(0xdd592658b98831f4), UINT64_C(0x606a80ad4f9f3085), UINT64_C(0x5f6161e7def55d4e), UINT64_C(0x1e7d42386d4beb13), UINT64_C(0x2176a372fc2186d8), UINT64_C(0xf6c2a0f82e6bf774), UINT64_C(0xc9c941b2bf019abf), UINT64_C(0x88d5626d0cbf2ce2), UINT64_C(0xb7de83279dd54129), UINT64_C(0x0aed25d26bc24058), UINT64_C(0x35e6c498faa82d93), UINT64_C(0x74fae74749169bce), UINT64_C(0x4bf1060dd87cf605), UINT64_C(0xe318eb5cf9ef77c4), UINT64_C(0xdc130a1668851a0f), UINT64_C(0x9d0f29c9db3bac52), UINT64_C(0xa204c8834a51c199), UINT64_C(0x1f376e76bc46c0e8), UINT64_C(0x203c8f3c2d2cad23), UINT64_C(0x6120ace39e921b7e), UINT64_C(0x5e2b4da90ff876b5), UINT64_C(0x899f4e23ddb20719), UINT64_C(0xb694af694cd86ad2), UINT64_C(0xf7888cb6ff66dc8f), UINT64_C(0xc8836dfc6e0cb144), UINT64_C(0x75b0cb09981bb035), UINT64_C(0x4abb2a430971ddfe), UINT64_C(0x0ba7099cbacf6ba3), UINT64_C(0x34ace8d62ba50668), UINT64_C(0x3617a1a2b155967e), UINT64_C(0x091c40e8203ffbb5), UINT64_C(0x4800633793814de8), UINT64_C(0x770b827d02eb2023), UINT64_C(0xca382488f4fc2152), UINT64_C(0xf533c5c265964c99), UINT64_C(0xb42fe61dd628fac4), UINT64_C(0x8b2407574742970f), UINT64_C(0x5c9004dd9508e6a3), UINT64_C(0x639be59704628b68), UINT64_C(0x2287c648b7dc3d35), UINT64_C(0x1d8c270226b650fe), UINT64_C(0xa0bf81f7d0a1518f), UINT64_C(0x9fb460bd41cb3c44), UINT64_C(0xdea84362f2758a19), UINT64_C(0xe1a3a228631fe7d2), UINT64_C(0xdbded18bc794aa35), UINT64_C(0xe4d530c156fec7fe), UINT64_C(0xa5c9131ee54071a3), UINT64_C(0x9ac2f254742a1c68), UINT64_C(0x27f154a1823d1d19), UINT64_C(0x18fab5eb135770d2), UINT64_C(0x59e69634a0e9c68f), UINT64_C(0x66ed777e3183ab44), UINT64_C(0xb15974f4e3c9dae8), UINT64_C(0x8e5295be72a3b723), UINT64_C(0xcf4eb661c11d017e), UINT64_C(0xf045572b50776cb5), UINT64_C(0x4d76f1dea6606dc4), UINT64_C(0x727d1094370a000f), UINT64_C(0x3361334b84b4b652), UINT64_C(0x0c6ad20115dedb99), UINT64_C(0x0ed19b758f2e4b8f), UINT64_C(0x31da7a3f1e442644), UINT64_C(0x70c659e0adfa9019), UINT64_C(0x4fcdb8aa3c90fdd2), UINT64_C(0xf2fe1e5fca87fca3), UINT64_C(0xcdf5ff155bed9168), UINT64_C(0x8ce9dccae8532735), UINT64_C(0xb3e23d8079394afe), UINT64_C(0x64563e0aab733b52), UINT64_C(0x5b5ddf403a195699), UINT64_C(0x1a41fc9f89a7e0c4), UINT64_C(0x254a1dd518cd8d0f), UINT64_C(0x9879bb20eeda8c7e), UINT64_C(0xa7725a6a7fb0e1b5), UINT64_C(0xe66e79b5cc0e57e8), UINT64_C(0xd96598ff5d643a23), UINT64_C(0x92949ef28518cc26), UINT64_C(0xad9f7fb81472a1ed), UINT64_C(0xec835c67a7cc17b0), UINT64_C(0xd388bd2d36a67a7b), UINT64_C(0x6ebb1bd8c0b17b0a), UINT64_C(0x51b0fa9251db16c1), UINT64_C(0x10acd94de265a09c), UINT64_C(0x2fa73807730fcd57), UINT64_C(0xf8133b8da145bcfb), UINT64_C(0xc718dac7302fd130), UINT64_C(0x8604f9188391676d), UINT64_C(0xb90f185212fb0aa6), UINT64_C(0x043cbea7e4ec0bd7), UINT64_C(0x3b375fed7586661c), UINT64_C(0x7a2b7c32c638d041), UINT64_C(0x45209d785752bd8a), UINT64_C(0x479bd40ccda22d9c), UINT64_C(0x789035465cc84057), UINT64_C(0x398c1699ef76f60a), UINT64_C(0x0687f7d37e1c9bc1), UINT64_C(0xbbb45126880b9ab0), UINT64_C(0x84bfb06c1961f77b), UINT64_C(0xc5a393b3aadf4126), UINT64_C(0xfaa872f93bb52ced), UINT64_C(0x2d1c7173e9ff5d41), UINT64_C(0x121790397895308a), UINT64_C(0x530bb3e6cb2b86d7), UINT64_C(0x6c0052ac5a41eb1c), UINT64_C(0xd133f459ac56ea6d), UINT64_C(0xee3815133d3c87a6), UINT64_C(0xaf2436cc8e8231fb), UINT64_C(0x902fd7861fe85c30), UINT64_C(0xaa52a425bb6311d7), UINT64_C(0x9559456f2a097c1c), UINT64_C(0xd44566b099b7ca41), UINT64_C(0xeb4e87fa08dda78a), UINT64_C(0x567d210ffecaa6fb), UINT64_C(0x6976c0456fa0cb30), UINT64_C(0x286ae39adc1e7d6d), UINT64_C(0x176102d04d7410a6), UINT64_C(0xc0d5015a9f3e610a), UINT64_C(0xffdee0100e540cc1), UINT64_C(0xbec2c3cfbdeaba9c), UINT64_C(0x81c922852c80d757), UINT64_C(0x3cfa8470da97d626), UINT64_C(0x03f1653a4bfdbbed), UINT64_C(0x42ed46e5f8430db0), UINT64_C(0x7de6a7af6929607b), UINT64_C(0x7f5deedbf3d9f06d), UINT64_C(0x40560f9162b39da6), UINT64_C(0x014a2c4ed10d2bfb), UINT64_C(0x3e41cd0440674630), UINT64_C(0x83726bf1b6704741), UINT64_C(0xbc798abb271a2a8a), UINT64_C(0xfd65a96494a49cd7), UINT64_C(0xc26e482e05cef11c), UINT64_C(0x15da4ba4d78480b0), UINT64_C(0x2ad1aaee46eeed7b), UINT64_C(0x6bcd8931f5505b26), UINT64_C(0x54c6687b643a36ed), UINT64_C(0xe9f5ce8e922d379c), UINT64_C(0xd6fe2fc403475a57), UINT64_C(0x97e20c1bb0f9ec0a), UINT64_C(0xa8e9ed51219381c1) }, { UINT64_C(0x0000000000000000), UINT64_C(0x54e979925cd0f10d), UINT64_C(0xa9d2f324b9a1e21a), UINT64_C(0xfd3b8ab6e5711317), UINT64_C(0xc17d4962dc4ddab1), UINT64_C(0x959430f0809d2bbc), UINT64_C(0x68afba4665ec38ab), UINT64_C(0x3c46c3d4393cc9a6), UINT64_C(0x10223dee1795abe7), UINT64_C(0x44cb447c4b455aea), UINT64_C(0xb9f0cecaae3449fd), UINT64_C(0xed19b758f2e4b8f0), UINT64_C(0xd15f748ccbd87156), UINT64_C(0x85b60d1e9708805b), UINT64_C(0x788d87a87279934c), UINT64_C(0x2c64fe3a2ea96241), UINT64_C(0x20447bdc2f2b57ce), UINT64_C(0x74ad024e73fba6c3), UINT64_C(0x899688f8968ab5d4), UINT64_C(0xdd7ff16aca5a44d9), UINT64_C(0xe13932bef3668d7f), UINT64_C(0xb5d04b2cafb67c72), UINT64_C(0x48ebc19a4ac76f65), UINT64_C(0x1c02b80816179e68), UINT64_C(0x3066463238befc29), UINT64_C(0x648f3fa0646e0d24), UINT64_C(0x99b4b516811f1e33), UINT64_C(0xcd5dcc84ddcfef3e), UINT64_C(0xf11b0f50e4f32698), UINT64_C(0xa5f276c2b823d795), UINT64_C(0x58c9fc745d52c482), UINT64_C(0x0c2085e60182358f), UINT64_C(0x4088f7b85e56af9c), UINT64_C(0x14618e2a02865e91), UINT64_C(0xe95a049ce7f74d86), UINT64_C(0xbdb37d0ebb27bc8b), UINT64_C(0x81f5beda821b752d), UINT64_C(0xd51cc748decb8420), UINT64_C(0x28274dfe3bba9737), UINT64_C(0x7cce346c676a663a), UINT64_C(0x50aaca5649c3047b), UINT64_C(0x0443b3c41513f576), UINT64_C(0xf9783972f062e661), UINT64_C(0xad9140e0acb2176c), UINT64_C(0x91d78334958edeca), UINT64_C(0xc53efaa6c95e2fc7), UINT64_C(0x380570102c2f3cd0), UINT64_C(0x6cec098270ffcddd), UINT64_C(0x60cc8c64717df852), UINT64_C(0x3425f5f62dad095f), UINT64_C(0xc91e7f40c8dc1a48), UINT64_C(0x9df706d2940ceb45), UINT64_C(0xa1b1c506ad3022e3), UINT64_C(0xf558bc94f1e0d3ee), UINT64_C(0x086336221491c0f9), UINT64_C(0x5c8a4fb0484131f4), UINT64_C(0x70eeb18a66e853b5), UINT64_C(0x2407c8183a38a2b8), UINT64_C(0xd93c42aedf49b1af), UINT64_C(0x8dd53b3c839940a2), UINT64_C(0xb193f8e8baa58904), UINT64_C(0xe57a817ae6757809), UINT64_C(0x18410bcc03046b1e), UINT64_C(0x4ca8725e5fd49a13), UINT64_C(0x8111ef70bcad5f38), UINT64_C(0xd5f896e2e07dae35), UINT64_C(0x28c31c54050cbd22), UINT64_C(0x7c2a65c659dc4c2f), UINT64_C(0x406ca61260e08589), UINT64_C(0x1485df803c307484), UINT64_C(0xe9be5536d9416793), UINT64_C(0xbd572ca48591969e), UINT64_C(0x9133d29eab38f4df), UINT64_C(0xc5daab0cf7e805d2), UINT64_C(0x38e121ba129916c5), UINT64_C(0x6c0858284e49e7c8), UINT64_C(0x504e9bfc77752e6e), UINT64_C(0x04a7e26e2ba5df63), UINT64_C(0xf99c68d8ced4cc74), UINT64_C(0xad75114a92043d79), UINT64_C(0xa15594ac938608f6), UINT64_C(0xf5bced3ecf56f9fb), UINT64_C(0x088767882a27eaec), UINT64_C(0x5c6e1e1a76f71be1), UINT64_C(0x6028ddce4fcbd247), UINT64_C(0x34c1a45c131b234a), UINT64_C(0xc9fa2eeaf66a305d), UINT64_C(0x9d135778aabac150), UINT64_C(0xb177a9428413a311), UINT64_C(0xe59ed0d0d8c3521c), UINT64_C(0x18a55a663db2410b), UINT64_C(0x4c4c23f46162b006), UINT64_C(0x700ae020585e79a0), UINT64_C(0x24e399b2048e88ad), UINT64_C(0xd9d81304e1ff9bba), UINT64_C(0x8d316a96bd2f6ab7), UINT64_C(0xc19918c8e2fbf0a4), UINT64_C(0x9570615abe2b01a9), UINT64_C(0x684bebec5b5a12be), UINT64_C(0x3ca2927e078ae3b3), UINT64_C(0x00e451aa3eb62a15), UINT64_C(0x540d28386266db18), UINT64_C(0xa936a28e8717c80f), UINT64_C(0xfddfdb1cdbc73902), UINT64_C(0xd1bb2526f56e5b43), UINT64_C(0x85525cb4a9beaa4e), UINT64_C(0x7869d6024ccfb959), UINT64_C(0x2c80af90101f4854), UINT64_C(0x10c66c44292381f2), UINT64_C(0x442f15d675f370ff), UINT64_C(0xb9149f60908263e8), UINT64_C(0xedfde6f2cc5292e5), UINT64_C(0xe1dd6314cdd0a76a), UINT64_C(0xb5341a8691005667), UINT64_C(0x480f903074714570), UINT64_C(0x1ce6e9a228a1b47d), UINT64_C(0x20a02a76119d7ddb), UINT64_C(0x744953e44d4d8cd6), UINT64_C(0x8972d952a83c9fc1), UINT64_C(0xdd9ba0c0f4ec6ecc), UINT64_C(0xf1ff5efada450c8d), UINT64_C(0xa51627688695fd80), UINT64_C(0x582dadde63e4ee97), UINT64_C(0x0cc4d44c3f341f9a), UINT64_C(0x308217980608d63c), UINT64_C(0x646b6e0a5ad82731), UINT64_C(0x9950e4bcbfa93426), UINT64_C(0xcdb99d2ee379c52b), UINT64_C(0x90fb71cad654a0f5), UINT64_C(0xc41208588a8451f8), UINT64_C(0x392982ee6ff542ef), UINT64_C(0x6dc0fb7c3325b3e2), UINT64_C(0x518638a80a197a44), UINT64_C(0x056f413a56c98b49), UINT64_C(0xf854cb8cb3b8985e), UINT64_C(0xacbdb21eef686953), UINT64_C(0x80d94c24c1c10b12), UINT64_C(0xd43035b69d11fa1f), UINT64_C(0x290bbf007860e908), UINT64_C(0x7de2c69224b01805), UINT64_C(0x41a405461d8cd1a3), UINT64_C(0x154d7cd4415c20ae), UINT64_C(0xe876f662a42d33b9), UINT64_C(0xbc9f8ff0f8fdc2b4), UINT64_C(0xb0bf0a16f97ff73b), UINT64_C(0xe4567384a5af0636), UINT64_C(0x196df93240de1521), UINT64_C(0x4d8480a01c0ee42c), UINT64_C(0x71c2437425322d8a), UINT64_C(0x252b3ae679e2dc87), UINT64_C(0xd810b0509c93cf90), UINT64_C(0x8cf9c9c2c0433e9d), UINT64_C(0xa09d37f8eeea5cdc), UINT64_C(0xf4744e6ab23aadd1), UINT64_C(0x094fc4dc574bbec6), UINT64_C(0x5da6bd4e0b9b4fcb), UINT64_C(0x61e07e9a32a7866d), UINT64_C(0x350907086e777760), UINT64_C(0xc8328dbe8b066477), UINT64_C(0x9cdbf42cd7d6957a), UINT64_C(0xd073867288020f69), UINT64_C(0x849affe0d4d2fe64), UINT64_C(0x79a1755631a3ed73), UINT64_C(0x2d480cc46d731c7e), UINT64_C(0x110ecf10544fd5d8), UINT64_C(0x45e7b682089f24d5), UINT64_C(0xb8dc3c34edee37c2), UINT64_C(0xec3545a6b13ec6cf), UINT64_C(0xc051bb9c9f97a48e), UINT64_C(0x94b8c20ec3475583), UINT64_C(0x698348b826364694), UINT64_C(0x3d6a312a7ae6b799), UINT64_C(0x012cf2fe43da7e3f), UINT64_C(0x55c58b6c1f0a8f32), UINT64_C(0xa8fe01dafa7b9c25), UINT64_C(0xfc177848a6ab6d28), UINT64_C(0xf037fdaea72958a7), UINT64_C(0xa4de843cfbf9a9aa), UINT64_C(0x59e50e8a1e88babd), UINT64_C(0x0d0c771842584bb0), UINT64_C(0x314ab4cc7b648216), UINT64_C(0x65a3cd5e27b4731b), UINT64_C(0x989847e8c2c5600c), UINT64_C(0xcc713e7a9e159101), UINT64_C(0xe015c040b0bcf340), UINT64_C(0xb4fcb9d2ec6c024d), UINT64_C(0x49c73364091d115a), UINT64_C(0x1d2e4af655cde057), UINT64_C(0x216889226cf129f1), UINT64_C(0x7581f0b03021d8fc), UINT64_C(0x88ba7a06d550cbeb), UINT64_C(0xdc53039489803ae6), UINT64_C(0x11ea9eba6af9ffcd), UINT64_C(0x4503e72836290ec0), UINT64_C(0xb8386d9ed3581dd7), UINT64_C(0xecd1140c8f88ecda), UINT64_C(0xd097d7d8b6b4257c), UINT64_C(0x847eae4aea64d471), UINT64_C(0x794524fc0f15c766), UINT64_C(0x2dac5d6e53c5366b), UINT64_C(0x01c8a3547d6c542a), UINT64_C(0x5521dac621bca527), UINT64_C(0xa81a5070c4cdb630), UINT64_C(0xfcf329e2981d473d), UINT64_C(0xc0b5ea36a1218e9b), UINT64_C(0x945c93a4fdf17f96), UINT64_C(0x6967191218806c81), UINT64_C(0x3d8e608044509d8c), UINT64_C(0x31aee56645d2a803), UINT64_C(0x65479cf41902590e), UINT64_C(0x987c1642fc734a19), UINT64_C(0xcc956fd0a0a3bb14), UINT64_C(0xf0d3ac04999f72b2), UINT64_C(0xa43ad596c54f83bf), UINT64_C(0x59015f20203e90a8), UINT64_C(0x0de826b27cee61a5), UINT64_C(0x218cd888524703e4), UINT64_C(0x7565a11a0e97f2e9), UINT64_C(0x885e2bacebe6e1fe), UINT64_C(0xdcb7523eb73610f3), UINT64_C(0xe0f191ea8e0ad955), UINT64_C(0xb418e878d2da2858), UINT64_C(0x492362ce37ab3b4f), UINT64_C(0x1dca1b5c6b7bca42), UINT64_C(0x5162690234af5051), UINT64_C(0x058b1090687fa15c), UINT64_C(0xf8b09a268d0eb24b), UINT64_C(0xac59e3b4d1de4346), UINT64_C(0x901f2060e8e28ae0), UINT64_C(0xc4f659f2b4327bed), UINT64_C(0x39cdd344514368fa), UINT64_C(0x6d24aad60d9399f7), UINT64_C(0x414054ec233afbb6), UINT64_C(0x15a92d7e7fea0abb), UINT64_C(0xe892a7c89a9b19ac), UINT64_C(0xbc7bde5ac64be8a1), UINT64_C(0x803d1d8eff772107), UINT64_C(0xd4d4641ca3a7d00a), UINT64_C(0x29efeeaa46d6c31d), UINT64_C(0x7d0697381a063210), UINT64_C(0x712612de1b84079f), UINT64_C(0x25cf6b4c4754f692), UINT64_C(0xd8f4e1faa225e585), UINT64_C(0x8c1d9868fef51488), UINT64_C(0xb05b5bbcc7c9dd2e), UINT64_C(0xe4b2222e9b192c23), UINT64_C(0x1989a8987e683f34), UINT64_C(0x4d60d10a22b8ce39), UINT64_C(0x61042f300c11ac78), UINT64_C(0x35ed56a250c15d75), UINT64_C(0xc8d6dc14b5b04e62), UINT64_C(0x9c3fa586e960bf6f), UINT64_C(0xa0796652d05c76c9), UINT64_C(0xf4901fc08c8c87c4), UINT64_C(0x09ab957669fd94d3), UINT64_C(0x5d42ece4352d65de) }, { UINT64_C(0x0000000000000000), UINT64_C(0xb32e4cbe03a75f6f), UINT64_C(0xf4843657a840a05b), UINT64_C(0x47aa7ae9abe7ff34), UINT64_C(0x7bd0c384ff8f5e33), UINT64_C(0xc8fe8f3afc28015c), UINT64_C(0x8f54f5d357cffe68), UINT64_C(0x3c7ab96d5468a107), UINT64_C(0xf7a18709ff1ebc66), UINT64_C(0x448fcbb7fcb9e309), UINT64_C(0x0325b15e575e1c3d), UINT64_C(0xb00bfde054f94352), UINT64_C(0x8c71448d0091e255), UINT64_C(0x3f5f08330336bd3a), UINT64_C(0x78f572daa8d1420e), UINT64_C(0xcbdb3e64ab761d61), UINT64_C(0x7d9ba13851336649), UINT64_C(0xceb5ed8652943926), UINT64_C(0x891f976ff973c612), UINT64_C(0x3a31dbd1fad4997d), UINT64_C(0x064b62bcaebc387a), UINT64_C(0xb5652e02ad1b6715), UINT64_C(0xf2cf54eb06fc9821), UINT64_C(0x41e11855055bc74e), UINT64_C(0x8a3a2631ae2dda2f), UINT64_C(0x39146a8fad8a8540), UINT64_C(0x7ebe1066066d7a74), UINT64_C(0xcd905cd805ca251b), UINT64_C(0xf1eae5b551a2841c), UINT64_C(0x42c4a90b5205db73), UINT64_C(0x056ed3e2f9e22447), UINT64_C(0xb6409f5cfa457b28), UINT64_C(0xfb374270a266cc92), UINT64_C(0x48190ecea1c193fd), UINT64_C(0x0fb374270a266cc9), UINT64_C(0xbc9d3899098133a6), UINT64_C(0x80e781f45de992a1), UINT64_C(0x33c9cd4a5e4ecdce), UINT64_C(0x7463b7a3f5a932fa), UINT64_C(0xc74dfb1df60e6d95), UINT64_C(0x0c96c5795d7870f4), UINT64_C(0xbfb889c75edf2f9b), UINT64_C(0xf812f32ef538d0af), UINT64_C(0x4b3cbf90f69f8fc0), UINT64_C(0x774606fda2f72ec7), UINT64_C(0xc4684a43a15071a8), UINT64_C(0x83c230aa0ab78e9c), UINT64_C(0x30ec7c140910d1f3), UINT64_C(0x86ace348f355aadb), UINT64_C(0x3582aff6f0f2f5b4), UINT64_C(0x7228d51f5b150a80), UINT64_C(0xc10699a158b255ef), UINT64_C(0xfd7c20cc0cdaf4e8), UINT64_C(0x4e526c720f7dab87), UINT64_C(0x09f8169ba49a54b3), UINT64_C(0xbad65a25a73d0bdc), UINT64_C(0x710d64410c4b16bd), UINT64_C(0xc22328ff0fec49d2), UINT64_C(0x85895216a40bb6e6), UINT64_C(0x36a71ea8a7ace989), UINT64_C(0x0adda7c5f3c4488e), UINT64_C(0xb9f3eb7bf06317e1), UINT64_C(0xfe5991925b84e8d5), UINT64_C(0x4d77dd2c5823b7ba), UINT64_C(0x64b62bcaebc387a1), UINT64_C(0xd7986774e864d8ce), UINT64_C(0x90321d9d438327fa), UINT64_C(0x231c512340247895), UINT64_C(0x1f66e84e144cd992), UINT64_C(0xac48a4f017eb86fd), UINT64_C(0xebe2de19bc0c79c9), UINT64_C(0x58cc92a7bfab26a6), UINT64_C(0x9317acc314dd3bc7), UINT64_C(0x2039e07d177a64a8), UINT64_C(0x67939a94bc9d9b9c), UINT64_C(0xd4bdd62abf3ac4f3), UINT64_C(0xe8c76f47eb5265f4), UINT64_C(0x5be923f9e8f53a9b), UINT64_C(0x1c4359104312c5af), UINT64_C(0xaf6d15ae40b59ac0), UINT64_C(0x192d8af2baf0e1e8), UINT64_C(0xaa03c64cb957be87), UINT64_C(0xeda9bca512b041b3), UINT64_C(0x5e87f01b11171edc), UINT64_C(0x62fd4976457fbfdb), UINT64_C(0xd1d305c846d8e0b4), UINT64_C(0x96797f21ed3f1f80), UINT64_C(0x2557339fee9840ef), UINT64_C(0xee8c0dfb45ee5d8e), UINT64_C(0x5da24145464902e1), UINT64_C(0x1a083bacedaefdd5), UINT64_C(0xa9267712ee09a2ba), UINT64_C(0x955cce7fba6103bd), UINT64_C(0x267282c1b9c65cd2), UINT64_C(0x61d8f8281221a3e6), UINT64_C(0xd2f6b4961186fc89), UINT64_C(0x9f8169ba49a54b33), UINT64_C(0x2caf25044a02145c), UINT64_C(0x6b055fede1e5eb68), UINT64_C(0xd82b1353e242b407), UINT64_C(0xe451aa3eb62a1500), UINT64_C(0x577fe680b58d4a6f), UINT64_C(0x10d59c691e6ab55b), UINT64_C(0xa3fbd0d71dcdea34), UINT64_C(0x6820eeb3b6bbf755), UINT64_C(0xdb0ea20db51ca83a), UINT64_C(0x9ca4d8e41efb570e), UINT64_C(0x2f8a945a1d5c0861), UINT64_C(0x13f02d374934a966), UINT64_C(0xa0de61894a93f609), UINT64_C(0xe7741b60e174093d), UINT64_C(0x545a57dee2d35652), UINT64_C(0xe21ac88218962d7a), UINT64_C(0x5134843c1b317215), UINT64_C(0x169efed5b0d68d21), UINT64_C(0xa5b0b26bb371d24e), UINT64_C(0x99ca0b06e7197349), UINT64_C(0x2ae447b8e4be2c26), UINT64_C(0x6d4e3d514f59d312), UINT64_C(0xde6071ef4cfe8c7d), UINT64_C(0x15bb4f8be788911c), UINT64_C(0xa6950335e42fce73), UINT64_C(0xe13f79dc4fc83147), UINT64_C(0x521135624c6f6e28), UINT64_C(0x6e6b8c0f1807cf2f), UINT64_C(0xdd45c0b11ba09040), UINT64_C(0x9aefba58b0476f74), UINT64_C(0x29c1f6e6b3e0301b), UINT64_C(0xc96c5795d7870f42), UINT64_C(0x7a421b2bd420502d), UINT64_C(0x3de861c27fc7af19), UINT64_C(0x8ec62d7c7c60f076), UINT64_C(0xb2bc941128085171), UINT64_C(0x0192d8af2baf0e1e), UINT64_C(0x4638a2468048f12a), UINT64_C(0xf516eef883efae45), UINT64_C(0x3ecdd09c2899b324), UINT64_C(0x8de39c222b3eec4b), UINT64_C(0xca49e6cb80d9137f), UINT64_C(0x7967aa75837e4c10), UINT64_C(0x451d1318d716ed17), UINT64_C(0xf6335fa6d4b1b278), UINT64_C(0xb199254f7f564d4c), UINT64_C(0x02b769f17cf11223), UINT64_C(0xb4f7f6ad86b4690b), UINT64_C(0x07d9ba1385133664), UINT64_C(0x4073c0fa2ef4c950), UINT64_C(0xf35d8c442d53963f), UINT64_C(0xcf273529793b3738), UINT64_C(0x7c0979977a9c6857), UINT64_C(0x3ba3037ed17b9763), UINT64_C(0x888d4fc0d2dcc80c), UINT64_C(0x435671a479aad56d), UINT64_C(0xf0783d1a7a0d8a02), UINT64_C(0xb7d247f3d1ea7536), UINT64_C(0x04fc0b4dd24d2a59), UINT64_C(0x3886b22086258b5e), UINT64_C(0x8ba8fe9e8582d431), UINT64_C(0xcc0284772e652b05), UINT64_C(0x7f2cc8c92dc2746a), UINT64_C(0x325b15e575e1c3d0), UINT64_C(0x8175595b76469cbf), UINT64_C(0xc6df23b2dda1638b), UINT64_C(0x75f16f0cde063ce4), UINT64_C(0x498bd6618a6e9de3), UINT64_C(0xfaa59adf89c9c28c), UINT64_C(0xbd0fe036222e3db8), UINT64_C(0x0e21ac88218962d7), UINT64_C(0xc5fa92ec8aff7fb6), UINT64_C(0x76d4de52895820d9), UINT64_C(0x317ea4bb22bfdfed), UINT64_C(0x8250e80521188082), UINT64_C(0xbe2a516875702185), UINT64_C(0x0d041dd676d77eea), UINT64_C(0x4aae673fdd3081de), UINT64_C(0xf9802b81de97deb1), UINT64_C(0x4fc0b4dd24d2a599), UINT64_C(0xfceef8632775faf6), UINT64_C(0xbb44828a8c9205c2), UINT64_C(0x086ace348f355aad), UINT64_C(0x34107759db5dfbaa), UINT64_C(0x873e3be7d8faa4c5), UINT64_C(0xc094410e731d5bf1), UINT64_C(0x73ba0db070ba049e), UINT64_C(0xb86133d4dbcc19ff), UINT64_C(0x0b4f7f6ad86b4690), UINT64_C(0x4ce50583738cb9a4), UINT64_C(0xffcb493d702be6cb), UINT64_C(0xc3b1f050244347cc), UINT64_C(0x709fbcee27e418a3), UINT64_C(0x3735c6078c03e797), UINT64_C(0x841b8ab98fa4b8f8), UINT64_C(0xadda7c5f3c4488e3), UINT64_C(0x1ef430e13fe3d78c), UINT64_C(0x595e4a08940428b8), UINT64_C(0xea7006b697a377d7), UINT64_C(0xd60abfdbc3cbd6d0), UINT64_C(0x6524f365c06c89bf), UINT64_C(0x228e898c6b8b768b), UINT64_C(0x91a0c532682c29e4), UINT64_C(0x5a7bfb56c35a3485), UINT64_C(0xe955b7e8c0fd6bea), UINT64_C(0xaeffcd016b1a94de), UINT64_C(0x1dd181bf68bdcbb1), UINT64_C(0x21ab38d23cd56ab6), UINT64_C(0x9285746c3f7235d9), UINT64_C(0xd52f0e859495caed), UINT64_C(0x6601423b97329582), UINT64_C(0xd041dd676d77eeaa), UINT64_C(0x636f91d96ed0b1c5), UINT64_C(0x24c5eb30c5374ef1), UINT64_C(0x97eba78ec690119e), UINT64_C(0xab911ee392f8b099), UINT64_C(0x18bf525d915feff6), UINT64_C(0x5f1528b43ab810c2), UINT64_C(0xec3b640a391f4fad), UINT64_C(0x27e05a6e926952cc), UINT64_C(0x94ce16d091ce0da3), UINT64_C(0xd3646c393a29f297), UINT64_C(0x604a2087398eadf8), UINT64_C(0x5c3099ea6de60cff), UINT64_C(0xef1ed5546e415390), UINT64_C(0xa8b4afbdc5a6aca4), UINT64_C(0x1b9ae303c601f3cb), UINT64_C(0x56ed3e2f9e224471), UINT64_C(0xe5c372919d851b1e), UINT64_C(0xa26908783662e42a), UINT64_C(0x114744c635c5bb45), UINT64_C(0x2d3dfdab61ad1a42), UINT64_C(0x9e13b115620a452d), UINT64_C(0xd9b9cbfcc9edba19), UINT64_C(0x6a978742ca4ae576), UINT64_C(0xa14cb926613cf817), UINT64_C(0x1262f598629ba778), UINT64_C(0x55c88f71c97c584c), UINT64_C(0xe6e6c3cfcadb0723), UINT64_C(0xda9c7aa29eb3a624), UINT64_C(0x69b2361c9d14f94b), UINT64_C(0x2e184cf536f3067f), UINT64_C(0x9d36004b35545910), UINT64_C(0x2b769f17cf112238), UINT64_C(0x9858d3a9ccb67d57), UINT64_C(0xdff2a94067518263), UINT64_C(0x6cdce5fe64f6dd0c), UINT64_C(0x50a65c93309e7c0b), UINT64_C(0xe388102d33392364), UINT64_C(0xa4226ac498dedc50), UINT64_C(0x170c267a9b79833f), UINT64_C(0xdcd7181e300f9e5e), UINT64_C(0x6ff954a033a8c131), UINT64_C(0x28532e49984f3e05), UINT64_C(0x9b7d62f79be8616a), UINT64_C(0xa707db9acf80c06d), UINT64_C(0x14299724cc279f02), UINT64_C(0x5383edcd67c06036), UINT64_C(0xe0ada17364673f59) } }; static const uint64_t crc64_interleaved_table[4][256] = { { UINT64_C(0x0000000000000000), UINT64_C(0xe88a0d0c5521de3d), UINT64_C(0x43ccb533054da2ff), UINT64_C(0xab46b83f506c7cc2), UINT64_C(0x87996a660a9b45fe), UINT64_C(0x6f13676a5fba9bc3), UINT64_C(0xc455df550fd6e701), UINT64_C(0x2cdfd2595af7393c), UINT64_C(0x9dea7be7ba389579), UINT64_C(0x756076ebef194b44), UINT64_C(0xde26ced4bf753786), UINT64_C(0x36acc3d8ea54e9bb), UINT64_C(0x1a731181b0a3d087), UINT64_C(0xf2f91c8de5820eba), UINT64_C(0x59bfa4b2b5ee7278), UINT64_C(0xb135a9bee0cfac45), UINT64_C(0xa90c58e4db7f3477), UINT64_C(0x418655e88e5eea4a), UINT64_C(0xeac0edd7de329688), UINT64_C(0x024ae0db8b1348b5), UINT64_C(0x2e953282d1e47189), UINT64_C(0xc61f3f8e84c5afb4), UINT64_C(0x6d5987b1d4a9d376), UINT64_C(0x85d38abd81880d4b), UINT64_C(0x34e623036147a10e), UINT64_C(0xdc6c2e0f34667f33), UINT64_C(0x772a9630640a03f1), UINT64_C(0x9fa09b3c312bddcc), UINT64_C(0xb37f49656bdce4f0), UINT64_C(0x5bf544693efd3acd), UINT64_C(0xf0b3fc566e91460f), UINT64_C(0x1839f15a3bb09832), UINT64_C(0xc0c01ee219f0766b), UINT64_C(0x284a13ee4cd1a856), UINT64_C(0x830cabd11cbdd494), UINT64_C(0x6b86a6dd499c0aa9), UINT64_C(0x47597484136b3395), UINT64_C(0xafd37988464aeda8), UINT64_C(0x0495c1b71626916a), UINT64_C(0xec1fccbb43074f57), UINT64_C(0x5d2a6505a3c8e312), UINT64_C(0xb5a06809f6e93d2f), UINT64_C(0x1ee6d036a68541ed), UINT64_C(0xf66cdd3af3a49fd0), UINT64_C(0xdab30f63a953a6ec), UINT64_C(0x3239026ffc7278d1), UINT64_C(0x997fba50ac1e0413), UINT64_C(0x71f5b75cf93fda2e), UINT64_C(0x69cc4606c28f421c), UINT64_C(0x81464b0a97ae9c21), UINT64_C(0x2a00f335c7c2e0e3), UINT64_C(0xc28afe3992e33ede), UINT64_C(0xee552c60c81407e2), UINT64_C(0x06df216c9d35d9df), UINT64_C(0xad999953cd59a51d), UINT64_C(0x4513945f98787b20), UINT64_C(0xf4263de178b7d765), UINT64_C(0x1cac30ed2d960958), UINT64_C(0xb7ea88d27dfa759a), UINT64_C(0x5f6085de28dbaba7), UINT64_C(0x73bf5787722c929b), UINT64_C(0x9b355a8b270d4ca6), UINT64_C(0x3073e2b477613064), UINT64_C(0xd8f9efb82240ee59), UINT64_C(0x135892ef9ceef253), UINT64_C(0xfbd29fe3c9cf2c6e), UINT64_C(0x509427dc99a350ac), UINT64_C(0xb81e2ad0cc828e91), UINT64_C(0x94c1f8899675b7ad), UINT64_C(0x7c4bf585c3546990), UINT64_C(0xd70d4dba93381552), UINT64_C(0x3f8740b6c619cb6f), UINT64_C(0x8eb2e90826d6672a), UINT64_C(0x6638e40473f7b917), UINT64_C(0xcd7e5c3b239bc5d5), UINT64_C(0x25f4513776ba1be8), UINT64_C(0x092b836e2c4d22d4), UINT64_C(0xe1a18e62796cfce9), UINT64_C(0x4ae7365d2900802b), UINT64_C(0xa26d3b517c215e16), UINT64_C(0xba54ca0b4791c624), UINT64_C(0x52dec70712b01819), UINT64_C(0xf9987f3842dc64db), UINT64_C(0x1112723417fdbae6), UINT64_C(0x3dcda06d4d0a83da), UINT64_C(0xd547ad61182b5de7), UINT64_C(0x7e01155e48472125), UINT64_C(0x968b18521d66ff18), UINT64_C(0x27beb1ecfda9535d), UINT64_C(0xcf34bce0a8888d60), UINT64_C(0x647204dff8e4f1a2), UINT64_C(0x8cf809d3adc52f9f), UINT64_C(0xa027db8af73216a3), UINT64_C(0x48add686a213c89e), UINT64_C(0xe3eb6eb9f27fb45c), UINT64_C(0x0b6163b5a75e6a61), UINT64_C(0xd3988c0d851e8438), UINT64_C(0x3b128101d03f5a05), UINT64_C(0x9054393e805326c7), UINT64_C(0x78de3432d572f8fa), UINT64_C(0x5401e66b8f85c1c6), UINT64_C(0xbc8beb67daa41ffb), UINT64_C(0x17cd53588ac86339), UINT64_C(0xff475e54dfe9bd04), UINT64_C(0x4e72f7ea3f261141), UINT64_C(0xa6f8fae66a07cf7c), UINT64_C(0x0dbe42d93a6bb3be), UINT64_C(0xe5344fd56f4a6d83), UINT64_C(0xc9eb9d8c35bd54bf), UINT64_C(0x21619080609c8a82), UINT64_C(0x8a2728bf30f0f640), UINT64_C(0x62ad25b365d1287d), UINT64_C(0x7a94d4e95e61b04f), UINT64_C(0x921ed9e50b406e72), UINT64_C(0x395861da5b2c12b0), UINT64_C(0xd1d26cd60e0dcc8d), UINT64_C(0xfd0dbe8f54faf5b1), UINT64_C(0x1587b38301db2b8c), UINT64_C(0xbec10bbc51b7574e), UINT64_C(0x564b06b004968973), UINT64_C(0xe77eaf0ee4592536), UINT64_C(0x0ff4a202b178fb0b), UINT64_C(0xa4b21a3de11487c9), UINT64_C(0x4c381731b43559f4), UINT64_C(0x60e7c568eec260c8), UINT64_C(0x886dc864bbe3bef5), UINT64_C(0x232b705beb8fc237), UINT64_C(0xcba17d57beae1c0a), UINT64_C(0x26b125df39dde4a6), UINT64_C(0xce3b28d36cfc3a9b), UINT64_C(0x657d90ec3c904659), UINT64_C(0x8df79de069b19864), UINT64_C(0xa1284fb93346a158), UINT64_C(0x49a242b566677f65), UINT64_C(0xe2e4fa8a360b03a7), UINT64_C(0x0a6ef786632add9a), UINT64_C(0xbb5b5e3883e571df), UINT64_C(0x53d15334d6c4afe2), UINT64_C(0xf897eb0b86a8d320), UINT64_C(0x101de607d3890d1d), UINT64_C(0x3cc2345e897e3421), UINT64_C(0xd4483952dc5fea1c), UINT64_C(0x7f0e816d8c3396de), UINT64_C(0x97848c61d91248e3), UINT64_C(0x8fbd7d3be2a2d0d1), UINT64_C(0x67377037b7830eec), UINT64_C(0xcc71c808e7ef722e), UINT64_C(0x24fbc504b2ceac13), UINT64_C(0x0824175de839952f), UINT64_C(0xe0ae1a51bd184b12), UINT64_C(0x4be8a26eed7437d0), UINT64_C(0xa362af62b855e9ed), UINT64_C(0x125706dc589a45a8), UINT64_C(0xfadd0bd00dbb9b95), UINT64_C(0x519bb3ef5dd7e757), UINT64_C(0xb911bee308f6396a), UINT64_C(0x95ce6cba52010056), UINT64_C(0x7d4461b60720de6b), UINT64_C(0xd602d989574ca2a9), UINT64_C(0x3e88d485026d7c94), UINT64_C(0xe6713b3d202d92cd), UINT64_C(0x0efb3631750c4cf0), UINT64_C(0xa5bd8e0e25603032), UINT64_C(0x4d3783027041ee0f), UINT64_C(0x61e8515b2ab6d733), UINT64_C(0x89625c577f97090e), UINT64_C(0x2224e4682ffb75cc), UINT64_C(0xcaaee9647adaabf1), UINT64_C(0x7b9b40da9a1507b4), UINT64_C(0x93114dd6cf34d989), UINT64_C(0x3857f5e99f58a54b), UINT64_C(0xd0ddf8e5ca797b76), UINT64_C(0xfc022abc908e424a), UINT64_C(0x148827b0c5af9c77), UINT64_C(0xbfce9f8f95c3e0b5), UINT64_C(0x57449283c0e23e88), UINT64_C(0x4f7d63d9fb52a6ba), UINT64_C(0xa7f76ed5ae737887), UINT64_C(0x0cb1d6eafe1f0445), UINT64_C(0xe43bdbe6ab3eda78), UINT64_C(0xc8e409bff1c9e344), UINT64_C(0x206e04b3a4e83d79), UINT64_C(0x8b28bc8cf48441bb), UINT64_C(0x63a2b180a1a59f86), UINT64_C(0xd297183e416a33c3), UINT64_C(0x3a1d1532144bedfe), UINT64_C(0x915bad0d4427913c), UINT64_C(0x79d1a00111064f01), UINT64_C(0x550e72584bf1763d), UINT64_C(0xbd847f541ed0a800), UINT64_C(0x16c2c76b4ebcd4c2), UINT64_C(0xfe48ca671b9d0aff), UINT64_C(0x35e9b730a53316f5), UINT64_C(0xdd63ba3cf012c8c8), UINT64_C(0x76250203a07eb40a), UINT64_C(0x9eaf0f0ff55f6a37), UINT64_C(0xb270dd56afa8530b), UINT64_C(0x5afad05afa898d36), UINT64_C(0xf1bc6865aae5f1f4), UINT64_C(0x19366569ffc42fc9), UINT64_C(0xa803ccd71f0b838c), UINT64_C(0x4089c1db4a2a5db1), UINT64_C(0xebcf79e41a462173), UINT64_C(0x034574e84f67ff4e), UINT64_C(0x2f9aa6b11590c672), UINT64_C(0xc710abbd40b1184f), UINT64_C(0x6c56138210dd648d), UINT64_C(0x84dc1e8e45fcbab0), UINT64_C(0x9ce5efd47e4c2282), UINT64_C(0x746fe2d82b6dfcbf), UINT64_C(0xdf295ae77b01807d), UINT64_C(0x37a357eb2e205e40), UINT64_C(0x1b7c85b274d7677c), UINT64_C(0xf3f688be21f6b941), UINT64_C(0x58b03081719ac583), UINT64_C(0xb03a3d8d24bb1bbe), UINT64_C(0x010f9433c474b7fb), UINT64_C(0xe985993f915569c6), UINT64_C(0x42c32100c1391504), UINT64_C(0xaa492c0c9418cb39), UINT64_C(0x8696fe55ceeff205), UINT64_C(0x6e1cf3599bce2c38), UINT64_C(0xc55a4b66cba250fa), UINT64_C(0x2dd0466a9e838ec7), UINT64_C(0xf529a9d2bcc3609e), UINT64_C(0x1da3a4dee9e2bea3), UINT64_C(0xb6e51ce1b98ec261), UINT64_C(0x5e6f11edecaf1c5c), UINT64_C(0x72b0c3b4b6582560), UINT64_C(0x9a3aceb8e379fb5d), UINT64_C(0x317c7687b315879f), UINT64_C(0xd9f67b8be63459a2), UINT64_C(0x68c3d23506fbf5e7), UINT64_C(0x8049df3953da2bda), UINT64_C(0x2b0f670603b65718), UINT64_C(0xc3856a0a56978925), UINT64_C(0xef5ab8530c60b019), UINT64_C(0x07d0b55f59416e24), UINT64_C(0xac960d60092d12e6), UINT64_C(0x441c006c5c0cccdb), UINT64_C(0x5c25f13667bc54e9), UINT64_C(0xb4affc3a329d8ad4), UINT64_C(0x1fe9440562f1f616), UINT64_C(0xf763490937d0282b), UINT64_C(0xdbbc9b506d271117), UINT64_C(0x3336965c3806cf2a), UINT64_C(0x98702e63686ab3e8), UINT64_C(0x70fa236f3d4b6dd5), UINT64_C(0xc1cf8ad1dd84c190), UINT64_C(0x294587dd88a51fad), UINT64_C(0x82033fe2d8c9636f), UINT64_C(0x6a8932ee8de8bd52), UINT64_C(0x4656e0b7d71f846e), UINT64_C(0xaedcedbb823e5a53), UINT64_C(0x059a5584d2522691), UINT64_C(0xed1058888773f8ac) }, { UINT64_C(0x0000000000000000), UINT64_C(0x4d624bbe73bbc94c), UINT64_C(0x9ac4977ce7779298), UINT64_C(0xd7a6dcc294cc5bd4), UINT64_C(0xa75181d261e13bb5), UINT64_C(0xea33ca6c125af2f9), UINT64_C(0x3d9516ae8696a92d), UINT64_C(0x70f75d10f52d6061), UINT64_C(0xdc7bac8f6ccc69ef), UINT64_C(0x9119e7311f77a0a3), UINT64_C(0x46bf3bf38bbbfb77), UINT64_C(0x0bdd704df800323b), UINT64_C(0x7b2a2d5d0d2d525a), UINT64_C(0x364866e37e969b16), UINT64_C(0xe1eeba21ea5ac0c2), UINT64_C(0xac8cf19f99e1098e), UINT64_C(0x2a2ff6357696cd5b), UINT64_C(0x674dbd8b052d0417), UINT64_C(0xb0eb614991e15fc3), UINT64_C(0xfd892af7e25a968f), UINT64_C(0x8d7e77e71777f6ee), UINT64_C(0xc01c3c5964cc3fa2), UINT64_C(0x17bae09bf0006476), UINT64_C(0x5ad8ab2583bbad3a), UINT64_C(0xf6545aba1a5aa4b4), UINT64_C(0xbb36110469e16df8), UINT64_C(0x6c90cdc6fd2d362c), UINT64_C(0x21f286788e96ff60), UINT64_C(0x5105db687bbb9f01), UINT64_C(0x1c6790d60800564d), UINT64_C(0xcbc14c149ccc0d99), UINT64_C(0x86a307aaef77c4d5), UINT64_C(0x545fec6aed2d9ab6), UINT64_C(0x193da7d49e9653fa), UINT64_C(0xce9b7b160a5a082e), UINT64_C(0x83f930a879e1c162), UINT64_C(0xf30e6db88ccca103), UINT64_C(0xbe6c2606ff77684f), UINT64_C(0x69cafac46bbb339b), UINT64_C(0x24a8b17a1800fad7), UINT64_C(0x882440e581e1f359), UINT64_C(0xc5460b5bf25a3a15), UINT64_C(0x12e0d799669661c1), UINT64_C(0x5f829c27152da88d), UINT64_C(0x2f75c137e000c8ec), UINT64_C(0x62178a8993bb01a0), UINT64_C(0xb5b1564b07775a74), UINT64_C(0xf8d31df574cc9338), UINT64_C(0x7e701a5f9bbb57ed), UINT64_C(0x331251e1e8009ea1), UINT64_C(0xe4b48d237cccc575), UINT64_C(0xa9d6c69d0f770c39), UINT64_C(0xd9219b8dfa5a6c58), UINT64_C(0x9443d03389e1a514), UINT64_C(0x43e50cf11d2dfec0), UINT64_C(0x0e87474f6e96378c), UINT64_C(0xa20bb6d0f7773e02), UINT64_C(0xef69fd6e84ccf74e), UINT64_C(0x38cf21ac1000ac9a), UINT64_C(0x75ad6a1263bb65d6), UINT64_C(0x055a3702969605b7), UINT64_C(0x48387cbce52dccfb), UINT64_C(0x9f9ea07e71e1972f), UINT64_C(0xd2fcebc0025a5e63), UINT64_C(0xa8bfd8d5da5b356c), UINT64_C(0xe5dd936ba9e0fc20), UINT64_C(0x327b4fa93d2ca7f4), UINT64_C(0x7f1904174e976eb8), UINT64_C(0x0fee5907bbba0ed9), UINT64_C(0x428c12b9c801c795), UINT64_C(0x952ace7b5ccd9c41), UINT64_C(0xd84885c52f76550d), UINT64_C(0x74c4745ab6975c83), UINT64_C(0x39a63fe4c52c95cf), UINT64_C(0xee00e32651e0ce1b), UINT64_C(0xa362a898225b0757), UINT64_C(0xd395f588d7766736), UINT64_C(0x9ef7be36a4cdae7a), UINT64_C(0x495162f43001f5ae), UINT64_C(0x0433294a43ba3ce2), UINT64_C(0x82902ee0accdf837), UINT64_C(0xcff2655edf76317b), UINT64_C(0x1854b99c4bba6aaf), UINT64_C(0x5536f2223801a3e3), UINT64_C(0x25c1af32cd2cc382), UINT64_C(0x68a3e48cbe970ace), UINT64_C(0xbf05384e2a5b511a), UINT64_C(0xf26773f059e09856), UINT64_C(0x5eeb826fc00191d8), UINT64_C(0x1389c9d1b3ba5894), UINT64_C(0xc42f151327760340), UINT64_C(0x894d5ead54cdca0c), UINT64_C(0xf9ba03bda1e0aa6d), UINT64_C(0xb4d84803d25b6321), UINT64_C(0x637e94c1469738f5), UINT64_C(0x2e1cdf7f352cf1b9), UINT64_C(0xfce034bf3776afda), UINT64_C(0xb1827f0144cd6696), UINT64_C(0x6624a3c3d0013d42), UINT64_C(0x2b46e87da3baf40e), UINT64_C(0x5bb1b56d5697946f), UINT64_C(0x16d3fed3252c5d23), UINT64_C(0xc1752211b1e006f7), UINT64_C(0x8c1769afc25bcfbb), UINT64_C(0x209b98305bbac635), UINT64_C(0x6df9d38e28010f79), UINT64_C(0xba5f0f4cbccd54ad), UINT64_C(0xf73d44f2cf769de1), UINT64_C(0x87ca19e23a5bfd80), UINT64_C(0xcaa8525c49e034cc), UINT64_C(0x1d0e8e9edd2c6f18), UINT64_C(0x506cc520ae97a654), UINT64_C(0xd6cfc28a41e06281), UINT64_C(0x9bad8934325babcd), UINT64_C(0x4c0b55f6a697f019), UINT64_C(0x01691e48d52c3955), UINT64_C(0x719e435820015934), UINT64_C(0x3cfc08e653ba9078), UINT64_C(0xeb5ad424c776cbac), UINT64_C(0xa6389f9ab4cd02e0), UINT64_C(0x0ab46e052d2c0b6e), UINT64_C(0x47d625bb5e97c222), UINT64_C(0x9070f979ca5b99f6), UINT64_C(0xdd12b2c7b9e050ba), UINT64_C(0xade5efd74ccd30db), UINT64_C(0xe087a4693f76f997), UINT64_C(0x372178ababbaa243), UINT64_C(0x7a433315d8016b0f), UINT64_C(0xc3a71e801bb8745d), UINT64_C(0x8ec5553e6803bd11), UINT64_C(0x596389fcfccfe6c5), UINT64_C(0x1401c2428f742f89), UINT64_C(0x64f69f527a594fe8), UINT64_C(0x2994d4ec09e286a4), UINT64_C(0xfe32082e9d2edd70), UINT64_C(0xb3504390ee95143c), UINT64_C(0x1fdcb20f77741db2), UINT64_C(0x52bef9b104cfd4fe), UINT64_C(0x8518257390038f2a), UINT64_C(0xc87a6ecde3b84666), UINT64_C(0xb88d33dd16952607), UINT64_C(0xf5ef7863652eef4b), UINT64_C(0x2249a4a1f1e2b49f), UINT64_C(0x6f2bef1f82597dd3), UINT64_C(0xe988e8b56d2eb906), UINT64_C(0xa4eaa30b1e95704a), UINT64_C(0x734c7fc98a592b9e), UINT64_C(0x3e2e3477f9e2e2d2), UINT64_C(0x4ed969670ccf82b3), UINT64_C(0x03bb22d97f744bff), UINT64_C(0xd41dfe1bebb8102b), UINT64_C(0x997fb5a59803d967), UINT64_C(0x35f3443a01e2d0e9), UINT64_C(0x78910f84725919a5), UINT64_C(0xaf37d346e6954271), UINT64_C(0xe25598f8952e8b3d), UINT64_C(0x92a2c5e86003eb5c), UINT64_C(0xdfc08e5613b82210), UINT64_C(0x08665294877479c4), UINT64_C(0x4504192af4cfb088), UINT64_C(0x97f8f2eaf695eeeb), UINT64_C(0xda9ab954852e27a7), UINT64_C(0x0d3c659611e27c73), UINT64_C(0x405e2e286259b53f), UINT64_C(0x30a973389774d55e), UINT64_C(0x7dcb3886e4cf1c12), UINT64_C(0xaa6de444700347c6), UINT64_C(0xe70faffa03b88e8a), UINT64_C(0x4b835e659a598704), UINT64_C(0x06e115dbe9e24e48), UINT64_C(0xd147c9197d2e159c), UINT64_C(0x9c2582a70e95dcd0), UINT64_C(0xecd2dfb7fbb8bcb1), UINT64_C(0xa1b09409880375fd), UINT64_C(0x761648cb1ccf2e29), UINT64_C(0x3b7403756f74e765), UINT64_C(0xbdd704df800323b0), UINT64_C(0xf0b54f61f3b8eafc), UINT64_C(0x271393a36774b128), UINT64_C(0x6a71d81d14cf7864), UINT64_C(0x1a86850de1e21805), UINT64_C(0x57e4ceb39259d149), UINT64_C(0x8042127106958a9d), UINT64_C(0xcd2059cf752e43d1), UINT64_C(0x61aca850eccf4a5f), UINT64_C(0x2ccee3ee9f748313), UINT64_C(0xfb683f2c0bb8d8c7), UINT64_C(0xb60a74927803118b), UINT64_C(0xc6fd29828d2e71ea), UINT64_C(0x8b9f623cfe95b8a6), UINT64_C(0x5c39befe6a59e372), UINT64_C(0x115bf54019e22a3e), UINT64_C(0x6b18c655c1e34131), UINT64_C(0x267a8debb258887d), UINT64_C(0xf1dc51292694d3a9), UINT64_C(0xbcbe1a97552f1ae5), UINT64_C(0xcc494787a0027a84), UINT64_C(0x812b0c39d3b9b3c8), UINT64_C(0x568dd0fb4775e81c), UINT64_C(0x1bef9b4534ce2150), UINT64_C(0xb7636adaad2f28de), UINT64_C(0xfa012164de94e192), UINT64_C(0x2da7fda64a58ba46), UINT64_C(0x60c5b61839e3730a), UINT64_C(0x1032eb08ccce136b), UINT64_C(0x5d50a0b6bf75da27), UINT64_C(0x8af67c742bb981f3), UINT64_C(0xc79437ca580248bf), UINT64_C(0x41373060b7758c6a), UINT64_C(0x0c557bdec4ce4526), UINT64_C(0xdbf3a71c50021ef2), UINT64_C(0x9691eca223b9d7be), UINT64_C(0xe666b1b2d694b7df), UINT64_C(0xab04fa0ca52f7e93), UINT64_C(0x7ca226ce31e32547), UINT64_C(0x31c06d704258ec0b), UINT64_C(0x9d4c9cefdbb9e585), UINT64_C(0xd02ed751a8022cc9), UINT64_C(0x07880b933cce771d), UINT64_C(0x4aea402d4f75be51), UINT64_C(0x3a1d1d3dba58de30), UINT64_C(0x777f5683c9e3177c), UINT64_C(0xa0d98a415d2f4ca8), UINT64_C(0xedbbc1ff2e9485e4), UINT64_C(0x3f472a3f2ccedb87), UINT64_C(0x722561815f7512cb), UINT64_C(0xa583bd43cbb9491f), UINT64_C(0xe8e1f6fdb8028053), UINT64_C(0x9816abed4d2fe032), UINT64_C(0xd574e0533e94297e), UINT64_C(0x02d23c91aa5872aa), UINT64_C(0x4fb0772fd9e3bbe6), UINT64_C(0xe33c86b04002b268), UINT64_C(0xae5ecd0e33b97b24), UINT64_C(0x79f811cca77520f0), UINT64_C(0x349a5a72d4cee9bc), UINT64_C(0x446d076221e389dd), UINT64_C(0x090f4cdc52584091), UINT64_C(0xdea9901ec6941b45), UINT64_C(0x93cbdba0b52fd209), UINT64_C(0x1568dc0a5a5816dc), UINT64_C(0x580a97b429e3df90), UINT64_C(0x8fac4b76bd2f8444), UINT64_C(0xc2ce00c8ce944d08), UINT64_C(0xb2395dd83bb92d69), UINT64_C(0xff5b16664802e425), UINT64_C(0x28fdcaa4dccebff1), UINT64_C(0x659f811aaf7576bd), UINT64_C(0xc913708536947f33), UINT64_C(0x84713b3b452fb67f), UINT64_C(0x53d7e7f9d1e3edab), UINT64_C(0x1eb5ac47a25824e7), UINT64_C(0x6e42f15757754486), UINT64_C(0x2320bae924ce8dca), UINT64_C(0xf486662bb002d61e), UINT64_C(0xb9e42d95c3b91f52) }, { UINT64_C(0x0000000000000000), UINT64_C(0x1596922b987ef63f), UINT64_C(0x2b2d245730fdec7e), UINT64_C(0x3ebbb67ca8831a41), UINT64_C(0x565a48ae61fbd8fc), UINT64_C(0x43ccda85f9852ec3), UINT64_C(0x7d776cf951063482), UINT64_C(0x68e1fed2c978c2bd), UINT64_C(0xacb4915cc3f7b1f8), UINT64_C(0xb92203775b8947c7), UINT64_C(0x8799b50bf30a5d86), UINT64_C(0x920f27206b74abb9), UINT64_C(0xfaeed9f2a20c6904), UINT64_C(0xef784bd93a729f3b), UINT64_C(0xd1c3fda592f1857a), UINT64_C(0xc4556f8e0a8f7345), UINT64_C(0xcbb18d9228e17d75), UINT64_C(0xde271fb9b09f8b4a), UINT64_C(0xe09ca9c5181c910b), UINT64_C(0xf50a3bee80626734), UINT64_C(0x9debc53c491aa589), UINT64_C(0x887d5717d16453b6), UINT64_C(0xb6c6e16b79e749f7), UINT64_C(0xa3507340e199bfc8), UINT64_C(0x67051cceeb16cc8d), UINT64_C(0x72938ee573683ab2), UINT64_C(0x4c283899dbeb20f3), UINT64_C(0x59beaab24395d6cc), UINT64_C(0x315f54608aed1471), UINT64_C(0x24c9c64b1293e24e), UINT64_C(0x1a727037ba10f80f), UINT64_C(0x0fe4e21c226e0e30), UINT64_C(0x05bbb40ffecce46f), UINT64_C(0x102d262466b21250), UINT64_C(0x2e969058ce310811), UINT64_C(0x3b000273564ffe2e), UINT64_C(0x53e1fca19f373c93), UINT64_C(0x46776e8a0749caac), UINT64_C(0x78ccd8f6afcad0ed), UINT64_C(0x6d5a4add37b426d2), UINT64_C(0xa90f25533d3b5597), UINT64_C(0xbc99b778a545a3a8), UINT64_C(0x822201040dc6b9e9), UINT64_C(0x97b4932f95b84fd6), UINT64_C(0xff556dfd5cc08d6b), UINT64_C(0xeac3ffd6c4be7b54), UINT64_C(0xd47849aa6c3d6115), UINT64_C(0xc1eedb81f443972a), UINT64_C(0xce0a399dd62d991a), UINT64_C(0xdb9cabb64e536f25), UINT64_C(0xe5271dcae6d07564), UINT64_C(0xf0b18fe17eae835b), UINT64_C(0x98507133b7d641e6), UINT64_C(0x8dc6e3182fa8b7d9), UINT64_C(0xb37d5564872bad98), UINT64_C(0xa6ebc74f1f555ba7), UINT64_C(0x62bea8c115da28e2), UINT64_C(0x77283aea8da4dedd), UINT64_C(0x49938c962527c49c), UINT64_C(0x5c051ebdbd5932a3), UINT64_C(0x34e4e06f7421f01e), UINT64_C(0x21727244ec5f0621), UINT64_C(0x1fc9c43844dc1c60), UINT64_C(0x0a5f5613dca2ea5f), UINT64_C(0x0b77681ffd99c8de), UINT64_C(0x1ee1fa3465e73ee1), UINT64_C(0x205a4c48cd6424a0), UINT64_C(0x35ccde63551ad29f), UINT64_C(0x5d2d20b19c621022), UINT64_C(0x48bbb29a041ce61d), UINT64_C(0x760004e6ac9ffc5c), UINT64_C(0x639696cd34e10a63), UINT64_C(0xa7c3f9433e6e7926), UINT64_C(0xb2556b68a6108f19), UINT64_C(0x8ceedd140e939558), UINT64_C(0x99784f3f96ed6367), UINT64_C(0xf199b1ed5f95a1da), UINT64_C(0xe40f23c6c7eb57e5), UINT64_C(0xdab495ba6f684da4), UINT64_C(0xcf220791f716bb9b), UINT64_C(0xc0c6e58dd578b5ab), UINT64_C(0xd55077a64d064394), UINT64_C(0xebebc1dae58559d5), UINT64_C(0xfe7d53f17dfbafea), UINT64_C(0x969cad23b4836d57), UINT64_C(0x830a3f082cfd9b68), UINT64_C(0xbdb18974847e8129), UINT64_C(0xa8271b5f1c007716), UINT64_C(0x6c7274d1168f0453), UINT64_C(0x79e4e6fa8ef1f26c), UINT64_C(0x475f50862672e82d), UINT64_C(0x52c9c2adbe0c1e12), UINT64_C(0x3a283c7f7774dcaf), UINT64_C(0x2fbeae54ef0a2a90), UINT64_C(0x11051828478930d1), UINT64_C(0x04938a03dff7c6ee), UINT64_C(0x0eccdc1003552cb1), UINT64_C(0x1b5a4e3b9b2bda8e), UINT64_C(0x25e1f84733a8c0cf), UINT64_C(0x30776a6cabd636f0), UINT64_C(0x589694be62aef44d), UINT64_C(0x4d000695fad00272), UINT64_C(0x73bbb0e952531833), UINT64_C(0x662d22c2ca2dee0c), UINT64_C(0xa2784d4cc0a29d49), UINT64_C(0xb7eedf6758dc6b76), UINT64_C(0x8955691bf05f7137), UINT64_C(0x9cc3fb3068218708), UINT64_C(0xf42205e2a15945b5), UINT64_C(0xe1b497c93927b38a), UINT64_C(0xdf0f21b591a4a9cb), UINT64_C(0xca99b39e09da5ff4), UINT64_C(0xc57d51822bb451c4), UINT64_C(0xd0ebc3a9b3caa7fb), UINT64_C(0xee5075d51b49bdba), UINT64_C(0xfbc6e7fe83374b85), UINT64_C(0x9327192c4a4f8938), UINT64_C(0x86b18b07d2317f07), UINT64_C(0xb80a3d7b7ab26546), UINT64_C(0xad9caf50e2cc9379), UINT64_C(0x69c9c0dee843e03c), UINT64_C(0x7c5f52f5703d1603), UINT64_C(0x42e4e489d8be0c42), UINT64_C(0x577276a240c0fa7d), UINT64_C(0x3f93887089b838c0), UINT64_C(0x2a051a5b11c6ceff), UINT64_C(0x14beac27b945d4be), UINT64_C(0x01283e0c213b2281), UINT64_C(0x16eed03ffb3391bc), UINT64_C(0x03784214634d6783), UINT64_C(0x3dc3f468cbce7dc2), UINT64_C(0x2855664353b08bfd), UINT64_C(0x40b498919ac84940), UINT64_C(0x55220aba02b6bf7f), UINT64_C(0x6b99bcc6aa35a53e), UINT64_C(0x7e0f2eed324b5301), UINT64_C(0xba5a416338c42044), UINT64_C(0xafccd348a0bad67b), UINT64_C(0x917765340839cc3a), UINT64_C(0x84e1f71f90473a05), UINT64_C(0xec0009cd593ff8b8), UINT64_C(0xf9969be6c1410e87), UINT64_C(0xc72d2d9a69c214c6), UINT64_C(0xd2bbbfb1f1bce2f9), UINT64_C(0xdd5f5dadd3d2ecc9), UINT64_C(0xc8c9cf864bac1af6), UINT64_C(0xf67279fae32f00b7), UINT64_C(0xe3e4ebd17b51f688), UINT64_C(0x8b051503b2293435), UINT64_C(0x9e9387282a57c20a), UINT64_C(0xa028315482d4d84b), UINT64_C(0xb5bea37f1aaa2e74), UINT64_C(0x71ebccf110255d31), UINT64_C(0x647d5eda885bab0e), UINT64_C(0x5ac6e8a620d8b14f), UINT64_C(0x4f507a8db8a64770), UINT64_C(0x27b1845f71de85cd), UINT64_C(0x32271674e9a073f2), UINT64_C(0x0c9ca008412369b3), UINT64_C(0x190a3223d95d9f8c), UINT64_C(0x1355643005ff75d3), UINT64_C(0x06c3f61b9d8183ec), UINT64_C(0x38784067350299ad), UINT64_C(0x2deed24cad7c6f92), UINT64_C(0x450f2c9e6404ad2f), UINT64_C(0x5099beb5fc7a5b10), UINT64_C(0x6e2208c954f94151), UINT64_C(0x7bb49ae2cc87b76e), UINT64_C(0xbfe1f56cc608c42b), UINT64_C(0xaa7767475e763214), UINT64_C(0x94ccd13bf6f52855), UINT64_C(0x815a43106e8bde6a), UINT64_C(0xe9bbbdc2a7f31cd7), UINT64_C(0xfc2d2fe93f8deae8), UINT64_C(0xc2969995970ef0a9), UINT64_C(0xd7000bbe0f700696), UINT64_C(0xd8e4e9a22d1e08a6), UINT64_C(0xcd727b89b560fe99), UINT64_C(0xf3c9cdf51de3e4d8), UINT64_C(0xe65f5fde859d12e7), UINT64_C(0x8ebea10c4ce5d05a), UINT64_C(0x9b283327d49b2665), UINT64_C(0xa593855b7c183c24), UINT64_C(0xb0051770e466ca1b), UINT64_C(0x745078feeee9b95e), UINT64_C(0x61c6ead576974f61), UINT64_C(0x5f7d5ca9de145520), UINT64_C(0x4aebce82466aa31f), UINT64_C(0x220a30508f1261a2), UINT64_C(0x379ca27b176c979d), UINT64_C(0x09271407bfef8ddc), UINT64_C(0x1cb1862c27917be3), UINT64_C(0x1d99b82006aa5962), UINT64_C(0x080f2a0b9ed4af5d), UINT64_C(0x36b49c773657b51c), UINT64_C(0x23220e5cae294323), UINT64_C(0x4bc3f08e6751819e), UINT64_C(0x5e5562a5ff2f77a1), UINT64_C(0x60eed4d957ac6de0), UINT64_C(0x757846f2cfd29bdf), UINT64_C(0xb12d297cc55de89a), UINT64_C(0xa4bbbb575d231ea5), UINT64_C(0x9a000d2bf5a004e4), UINT64_C(0x8f969f006ddef2db), UINT64_C(0xe77761d2a4a63066), UINT64_C(0xf2e1f3f93cd8c659), UINT64_C(0xcc5a4585945bdc18), UINT64_C(0xd9ccd7ae0c252a27), UINT64_C(0xd62835b22e4b2417), UINT64_C(0xc3bea799b635d228), UINT64_C(0xfd0511e51eb6c869), UINT64_C(0xe89383ce86c83e56), UINT64_C(0x80727d1c4fb0fceb), UINT64_C(0x95e4ef37d7ce0ad4), UINT64_C(0xab5f594b7f4d1095), UINT64_C(0xbec9cb60e733e6aa), UINT64_C(0x7a9ca4eeedbc95ef), UINT64_C(0x6f0a36c575c263d0), UINT64_C(0x51b180b9dd417991), UINT64_C(0x44271292453f8fae), UINT64_C(0x2cc6ec408c474d13), UINT64_C(0x39507e6b1439bb2c), UINT64_C(0x07ebc817bcbaa16d), UINT64_C(0x127d5a3c24c45752), UINT64_C(0x18220c2ff866bd0d), UINT64_C(0x0db49e0460184b32), UINT64_C(0x330f2878c89b5173), UINT64_C(0x2699ba5350e5a74c), UINT64_C(0x4e784481999d65f1), UINT64_C(0x5beed6aa01e393ce), UINT64_C(0x655560d6a960898f), UINT64_C(0x70c3f2fd311e7fb0), UINT64_C(0xb4969d733b910cf5), UINT64_C(0xa1000f58a3effaca), UINT64_C(0x9fbbb9240b6ce08b), UINT64_C(0x8a2d2b0f931216b4), UINT64_C(0xe2ccd5dd5a6ad409), UINT64_C(0xf75a47f6c2142236), UINT64_C(0xc9e1f18a6a973877), UINT64_C(0xdc7763a1f2e9ce48), UINT64_C(0xd39381bdd087c078), UINT64_C(0xc605139648f93647), UINT64_C(0xf8bea5eae07a2c06), UINT64_C(0xed2837c17804da39), UINT64_C(0x85c9c913b17c1884), UINT64_C(0x905f5b382902eebb), UINT64_C(0xaee4ed448181f4fa), UINT64_C(0xbb727f6f19ff02c5), UINT64_C(0x7f2710e113707180), UINT64_C(0x6ab182ca8b0e87bf), UINT64_C(0x540a34b6238d9dfe), UINT64_C(0x419ca69dbbf36bc1), UINT64_C(0x297d584f728ba97c), UINT64_C(0x3cebca64eaf55f43), UINT64_C(0x02507c1842764502), UINT64_C(0x17c6ee33da08b33d) }, { UINT64_C(0x0000000000000000), UINT64_C(0x2ddda07ff6672378), UINT64_C(0x5bbb40ffecce46f0), UINT64_C(0x7666e0801aa96588), UINT64_C(0xb77681ffd99c8de0), UINT64_C(0x9aab21802ffbae98), UINT64_C(0xeccdc1003552cb10), UINT64_C(0xc110617fc335e868), UINT64_C(0xfc35acd41c370545), UINT64_C(0xd1e80cabea50263d), UINT64_C(0xa78eec2bf0f943b5), UINT64_C(0x8a534c54069e60cd), UINT64_C(0x4b432d2bc5ab88a5), UINT64_C(0x669e8d5433ccabdd), UINT64_C(0x10f86dd42965ce55), UINT64_C(0x3d25cdabdf02ed2d), UINT64_C(0x6ab3f6839760140f), UINT64_C(0x476e56fc61073777), UINT64_C(0x3108b67c7bae52ff), UINT64_C(0x1cd516038dc97187), UINT64_C(0xddc5777c4efc99ef), UINT64_C(0xf018d703b89bba97), UINT64_C(0x867e3783a232df1f), UINT64_C(0xaba397fc5455fc67), UINT64_C(0x96865a578b57114a), UINT64_C(0xbb5bfa287d303232), UINT64_C(0xcd3d1aa8679957ba), UINT64_C(0xe0e0bad791fe74c2), UINT64_C(0x21f0dba852cb9caa), UINT64_C(0x0c2d7bd7a4acbfd2), UINT64_C(0x7a4b9b57be05da5a), UINT64_C(0x57963b284862f922), UINT64_C(0xd567ed072ec0281e), UINT64_C(0xf8ba4d78d8a70b66), UINT64_C(0x8edcadf8c20e6eee), UINT64_C(0xa3010d8734694d96), UINT64_C(0x62116cf8f75ca5fe), UINT64_C(0x4fcccc87013b8686), UINT64_C(0x39aa2c071b92e30e), UINT64_C(0x14778c78edf5c076), UINT64_C(0x295241d332f72d5b), UINT64_C(0x048fe1acc4900e23), UINT64_C(0x72e9012cde396bab), UINT64_C(0x5f34a153285e48d3), UINT64_C(0x9e24c02ceb6ba0bb), UINT64_C(0xb3f960531d0c83c3), UINT64_C(0xc59f80d307a5e64b), UINT64_C(0xe84220acf1c2c533), UINT64_C(0xbfd41b84b9a03c11), UINT64_C(0x9209bbfb4fc71f69), UINT64_C(0xe46f5b7b556e7ae1), UINT64_C(0xc9b2fb04a3095999), UINT64_C(0x08a29a7b603cb1f1), UINT64_C(0x257f3a04965b9289), UINT64_C(0x5319da848cf2f701), UINT64_C(0x7ec47afb7a95d479), UINT64_C(0x43e1b750a5973954), UINT64_C(0x6e3c172f53f01a2c), UINT64_C(0x185af7af49597fa4), UINT64_C(0x358757d0bf3e5cdc), UINT64_C(0xf49736af7c0bb4b4), UINT64_C(0xd94a96d08a6c97cc), UINT64_C(0xaf2c765090c5f244), UINT64_C(0x82f1d62f66a2d13c), UINT64_C(0x38177525f28e4eb9), UINT64_C(0x15cad55a04e96dc1), UINT64_C(0x63ac35da1e400849), UINT64_C(0x4e7195a5e8272b31), UINT64_C(0x8f61f4da2b12c359), UINT64_C(0xa2bc54a5dd75e021), UINT64_C(0xd4dab425c7dc85a9), UINT64_C(0xf907145a31bba6d1), UINT64_C(0xc422d9f1eeb94bfc), UINT64_C(0xe9ff798e18de6884), UINT64_C(0x9f99990e02770d0c), UINT64_C(0xb2443971f4102e74), UINT64_C(0x7354580e3725c61c), UINT64_C(0x5e89f871c142e564), UINT64_C(0x28ef18f1dbeb80ec), UINT64_C(0x0532b88e2d8ca394), UINT64_C(0x52a483a665ee5ab6), UINT64_C(0x7f7923d9938979ce), UINT64_C(0x091fc35989201c46), UINT64_C(0x24c263267f473f3e), UINT64_C(0xe5d20259bc72d756), UINT64_C(0xc80fa2264a15f42e), UINT64_C(0xbe6942a650bc91a6), UINT64_C(0x93b4e2d9a6dbb2de), UINT64_C(0xae912f7279d95ff3), UINT64_C(0x834c8f0d8fbe7c8b), UINT64_C(0xf52a6f8d95171903), UINT64_C(0xd8f7cff263703a7b), UINT64_C(0x19e7ae8da045d213), UINT64_C(0x343a0ef25622f16b), UINT64_C(0x425cee724c8b94e3), UINT64_C(0x6f814e0dbaecb79b), UINT64_C(0xed709822dc4e66a7), UINT64_C(0xc0ad385d2a2945df), UINT64_C(0xb6cbd8dd30802057), UINT64_C(0x9b1678a2c6e7032f), UINT64_C(0x5a0619dd05d2eb47), UINT64_C(0x77dbb9a2f3b5c83f), UINT64_C(0x01bd5922e91cadb7), UINT64_C(0x2c60f95d1f7b8ecf), UINT64_C(0x114534f6c07963e2), UINT64_C(0x3c989489361e409a), UINT64_C(0x4afe74092cb72512), UINT64_C(0x6723d476dad0066a), UINT64_C(0xa633b50919e5ee02), UINT64_C(0x8bee1576ef82cd7a), UINT64_C(0xfd88f5f6f52ba8f2), UINT64_C(0xd0555589034c8b8a), UINT64_C(0x87c36ea14b2e72a8), UINT64_C(0xaa1ecedebd4951d0), UINT64_C(0xdc782e5ea7e03458), UINT64_C(0xf1a58e2151871720), UINT64_C(0x30b5ef5e92b2ff48), UINT64_C(0x1d684f2164d5dc30), UINT64_C(0x6b0eafa17e7cb9b8), UINT64_C(0x46d30fde881b9ac0), UINT64_C(0x7bf6c275571977ed), UINT64_C(0x562b620aa17e5495), UINT64_C(0x204d828abbd7311d), UINT64_C(0x0d9022f54db01265), UINT64_C(0xcc80438a8e85fa0d), UINT64_C(0xe15de3f578e2d975), UINT64_C(0x973b0375624bbcfd), UINT64_C(0xbae6a30a942c9f85), UINT64_C(0x702eea4be51c9d72), UINT64_C(0x5df34a34137bbe0a), UINT64_C(0x2b95aab409d2db82), UINT64_C(0x06480acbffb5f8fa), UINT64_C(0xc7586bb43c801092), UINT64_C(0xea85cbcbcae733ea), UINT64_C(0x9ce32b4bd04e5662), UINT64_C(0xb13e8b342629751a), UINT64_C(0x8c1b469ff92b9837), UINT64_C(0xa1c6e6e00f4cbb4f), UINT64_C(0xd7a0066015e5dec7), UINT64_C(0xfa7da61fe382fdbf), UINT64_C(0x3b6dc76020b715d7), UINT64_C(0x16b0671fd6d036af), UINT64_C(0x60d6879fcc795327), UINT64_C(0x4d0b27e03a1e705f), UINT64_C(0x1a9d1cc8727c897d), UINT64_C(0x3740bcb7841baa05), UINT64_C(0x41265c379eb2cf8d), UINT64_C(0x6cfbfc4868d5ecf5), UINT64_C(0xadeb9d37abe0049d), UINT64_C(0x80363d485d8727e5), UINT64_C(0xf650ddc8472e426d), UINT64_C(0xdb8d7db7b1496115), UINT64_C(0xe6a8b01c6e4b8c38), UINT64_C(0xcb751063982caf40), UINT64_C(0xbd13f0e38285cac8), UINT64_C(0x90ce509c74e2e9b0), UINT64_C(0x51de31e3b7d701d8), UINT64_C(0x7c03919c41b022a0), UINT64_C(0x0a65711c5b194728), UINT64_C(0x27b8d163ad7e6450), UINT64_C(0xa549074ccbdcb56c), UINT64_C(0x8894a7333dbb9614), UINT64_C(0xfef247b32712f39c), UINT64_C(0xd32fe7ccd175d0e4), UINT64_C(0x123f86b31240388c), UINT64_C(0x3fe226cce4271bf4), UINT64_C(0x4984c64cfe8e7e7c), UINT64_C(0x6459663308e95d04), UINT64_C(0x597cab98d7ebb029), UINT64_C(0x74a10be7218c9351), UINT64_C(0x02c7eb673b25f6d9), UINT64_C(0x2f1a4b18cd42d5a1), UINT64_C(0xee0a2a670e773dc9), UINT64_C(0xc3d78a18f8101eb1), UINT64_C(0xb5b16a98e2b97b39), UINT64_C(0x986ccae714de5841), UINT64_C(0xcffaf1cf5cbca163), UINT64_C(0xe22751b0aadb821b), UINT64_C(0x9441b130b072e793), UINT64_C(0xb99c114f4615c4eb), UINT64_C(0x788c703085202c83), UINT64_C(0x5551d04f73470ffb), UINT64_C(0x233730cf69ee6a73), UINT64_C(0x0eea90b09f89490b), UINT64_C(0x33cf5d1b408ba426), UINT64_C(0x1e12fd64b6ec875e), UINT64_C(0x68741de4ac45e2d6), UINT64_C(0x45a9bd9b5a22c1ae), UINT64_C(0x84b9dce4991729c6), UINT64_C(0xa9647c9b6f700abe), UINT64_C(0xdf029c1b75d96f36), UINT64_C(0xf2df3c6483be4c4e), UINT64_C(0x48399f6e1792d3cb), UINT64_C(0x65e43f11e1f5f0b3), UINT64_C(0x1382df91fb5c953b), UINT64_C(0x3e5f7fee0d3bb643), UINT64_C(0xff4f1e91ce0e5e2b), UINT64_C(0xd292beee38697d53), UINT64_C(0xa4f45e6e22c018db), UINT64_C(0x8929fe11d4a73ba3), UINT64_C(0xb40c33ba0ba5d68e), UINT64_C(0x99d193c5fdc2f5f6), UINT64_C(0xefb77345e76b907e), UINT64_C(0xc26ad33a110cb306), UINT64_C(0x037ab245d2395b6e), UINT64_C(0x2ea7123a245e7816), UINT64_C(0x58c1f2ba3ef71d9e), UINT64_C(0x751c52c5c8903ee6), UINT64_C(0x228a69ed80f2c7c4), UINT64_C(0x0f57c9927695e4bc), UINT64_C(0x793129126c3c8134), UINT64_C(0x54ec896d9a5ba24c), UINT64_C(0x95fce812596e4a24), UINT64_C(0xb821486daf09695c), UINT64_C(0xce47a8edb5a00cd4), UINT64_C(0xe39a089243c72fac), UINT64_C(0xdebfc5399cc5c281), UINT64_C(0xf36265466aa2e1f9), UINT64_C(0x850485c6700b8471), UINT64_C(0xa8d925b9866ca709), UINT64_C(0x69c944c645594f61), UINT64_C(0x4414e4b9b33e6c19), UINT64_C(0x32720439a9970991), UINT64_C(0x1fafa4465ff02ae9), UINT64_C(0x9d5e72693952fbd5), UINT64_C(0xb083d216cf35d8ad), UINT64_C(0xc6e53296d59cbd25), UINT64_C(0xeb3892e923fb9e5d), UINT64_C(0x2a28f396e0ce7635), UINT64_C(0x07f553e916a9554d), UINT64_C(0x7193b3690c0030c5), UINT64_C(0x5c4e1316fa6713bd), UINT64_C(0x616bdebd2565fe90), UINT64_C(0x4cb67ec2d302dde8), UINT64_C(0x3ad09e42c9abb860), UINT64_C(0x170d3e3d3fcc9b18), UINT64_C(0xd61d5f42fcf97370), UINT64_C(0xfbc0ff3d0a9e5008), UINT64_C(0x8da61fbd10373580), UINT64_C(0xa07bbfc2e65016f8), UINT64_C(0xf7ed84eaae32efda), UINT64_C(0xda3024955855cca2), UINT64_C(0xac56c41542fca92a), UINT64_C(0x818b646ab49b8a52), UINT64_C(0x409b051577ae623a), UINT64_C(0x6d46a56a81c94142), UINT64_C(0x1b2045ea9b6024ca), UINT64_C(0x36fde5956d0707b2), UINT64_C(0x0bd8283eb205ea9f), UINT64_C(0x260588414462c9e7), UINT64_C(0x506368c15ecbac6f), UINT64_C(0x7dbec8bea8ac8f17), UINT64_C(0xbcaea9c16b99677f), UINT64_C(0x917309be9dfe4407), UINT64_C(0xe715e93e8757218f), UINT64_C(0xcac84941713002f7) } }; static inline uint64_t crc64_slow(const void *input, size_t nbytes) { const unsigned char *data = (const unsigned char*) input; uint64_t cs = UINT64_C(0xffffffffffffffff); while (nbytes--) { uint32_t idx = ((uint32_t) (cs ^ *data++)) & 0xff; cs = crc64_table[3][idx] ^ (cs >> 8); } return cs ^ UINT64_C(0xffffffffffffffff); } // Loads an input 32-bit word in little-endian order from a big-endian machine. static inline uint32_t crc64_load_le32_(const uint32_t *p) { #ifdef __ppc__ // See: http://hardwarebug.org/2008/10/25/gcc-inline-asm-annoyance/ uint32_t v; asm ("lwbrx %0, %y1" : "=r"(v) : "Z"(*p)); return v; #else uint32_t w = *p; return ((((w) & 0xff000000) >> 24) | (((w) & 0x00ff0000) >> 8) | (((w) & 0x0000ff00) << 8) | (((w) & 0x000000ff) << 24)); #endif } // A parallel multiword interleaved algorithm with a word size of 4 bytes // and a stride factor of 5. static inline uint64_t crc64(const void *input, size_t nbytes) { const unsigned char *data = (const unsigned char*) input; const unsigned char *end = data + nbytes; uint64_t cs[5] = { UINT64_C(0xffffffffffffffff), 0, 0, 0, 0 }; // Process byte-by-byte until proper alignment is attained. // In the inner loop, we process 5 4-byte words (20 bytes in total) // per iteration. If the amount of data remaining is small, // then we also use the slow algorithm. while (data < end && ((((size_t) data) & 3) || (end - data < 20))) { uint32_t idx = ((uint32_t) (cs[0] ^ *data++)) & 0xff; cs[0] = crc64_table[3][idx] ^ (cs[0] >> 8); } if (data == end) return cs[0] ^ UINT64_C(0xffffffffffffffff); const uint32_t one = 1; bool big_endian = !(*((char *)(&one))); uint64_t cry = 0; uint32_t in[5]; if (!big_endian) { for (unsigned i = 0; i < 5; ++i) in[i] = ((const uint32_t*) data)[i]; data += 20; for (; end - data >= 20; data += 20) { cs[0] ^= cry; in[0] ^= (uint32_t) cs[0]; cs[1] ^= cs[0] >> 32; cs[0] = crc64_interleaved_table[0][in[0] & 0xff]; in[0] >>= 8; in[1] ^= (uint32_t) cs[1]; cs[2] ^= cs[1] >> 32; cs[1] = crc64_interleaved_table[0][in[1] & 0xff]; in[1] >>= 8; in[2] ^= (uint32_t) cs[2]; cs[3] ^= cs[2] >> 32; cs[2] = crc64_interleaved_table[0][in[2] & 0xff]; in[2] >>= 8; in[3] ^= (uint32_t) cs[3]; cs[4] ^= cs[3] >> 32; cs[3] = crc64_interleaved_table[0][in[3] & 0xff]; in[3] >>= 8; in[4] ^= (uint32_t) cs[4]; cry = cs[4] >> 32; cs[4] = crc64_interleaved_table[0][in[4] & 0xff]; in[4] >>= 8; for (unsigned b = 1; b < 3; ++b) { cs[0] ^= crc64_interleaved_table[b][in[0] & 0xff]; in[0] >>= 8; cs[1] ^= crc64_interleaved_table[b][in[1] & 0xff]; in[1] >>= 8; cs[2] ^= crc64_interleaved_table[b][in[2] & 0xff]; in[2] >>= 8; cs[3] ^= crc64_interleaved_table[b][in[3] & 0xff]; in[3] >>= 8; cs[4] ^= crc64_interleaved_table[b][in[4] & 0xff]; in[4] >>= 8; } cs[0] ^= crc64_interleaved_table[3][in[0] & 0xff]; in[0] = ((const uint32_t*) data)[0]; cs[1] ^= crc64_interleaved_table[3][in[1] & 0xff]; in[1] = ((const uint32_t*) data)[1]; cs[2] ^= crc64_interleaved_table[3][in[2] & 0xff]; in[2] = ((const uint32_t*) data)[2]; cs[3] ^= crc64_interleaved_table[3][in[3] & 0xff]; in[3] = ((const uint32_t*) data)[3]; cs[4] ^= crc64_interleaved_table[3][in[4] & 0xff]; in[4] = ((const uint32_t*) data)[4]; } } else { for (unsigned i = 0; i < 5; ++i) { in[i] = crc64_load_le32_(&((const uint32_t*) data)[i]); } data += 20; for (; end - data >= 20; data += 20) { cs[0] ^= cry; in[0] ^= (uint32_t) cs[0]; cs[1] ^= cs[0] >> 32; cs[0] = crc64_interleaved_table[0][in[0] & 0xff]; in[0] >>= 8; in[1] ^= (uint32_t) cs[1]; cs[2] ^= cs[1] >> 32; cs[1] = crc64_interleaved_table[0][in[1] & 0xff]; in[1] >>= 8; in[2] ^= (uint32_t) cs[2]; cs[3] ^= cs[2] >> 32; cs[2] = crc64_interleaved_table[0][in[2] & 0xff]; in[2] >>= 8; in[3] ^= (uint32_t) cs[3]; cs[4] ^= cs[3] >> 32; cs[3] = crc64_interleaved_table[0][in[3] & 0xff]; in[3] >>= 8; in[4] ^= (uint32_t) cs[4]; cry = cs[4] >> 32; cs[4] = crc64_interleaved_table[0][in[4] & 0xff]; in[4] >>= 8; for (unsigned b = 1; b < 3; ++b) { cs[0] ^= crc64_interleaved_table[b][in[0] & 0xff]; in[0] >>= 8; cs[1] ^= crc64_interleaved_table[b][in[1] & 0xff]; in[1] >>= 8; cs[2] ^= crc64_interleaved_table[b][in[2] & 0xff]; in[2] >>= 8; cs[3] ^= crc64_interleaved_table[b][in[3] & 0xff]; in[3] >>= 8; cs[4] ^= crc64_interleaved_table[b][in[4] & 0xff]; in[4] >>= 8; } cs[0] ^= crc64_interleaved_table[3][in[0] & 0xff]; in[0] = crc64_load_le32_(&((const uint32_t*) data)[0]); cs[1] ^= crc64_interleaved_table[3][in[1] & 0xff]; in[1] = crc64_load_le32_(&((const uint32_t*) data)[1]); cs[2] ^= crc64_interleaved_table[3][in[2] & 0xff]; in[2] = crc64_load_le32_(&((const uint32_t*) data)[2]); cs[3] ^= crc64_interleaved_table[3][in[3] & 0xff]; in[3] = crc64_load_le32_(&((const uint32_t*) data)[3]); cs[4] ^= crc64_interleaved_table[3][in[4] & 0xff]; in[4] = crc64_load_le32_(&((const uint32_t*) data)[4]); } } cs[0] ^= cry; for (unsigned i = 0; i < 5; ++i) { if (i > 0) cs[0] ^= cs[i]; in[i] ^= (uint32_t) cs[0]; cs[0] = cs[0] >> 32; for (unsigned b = 0; b < 3; ++b) { cs[0] ^= crc64_table[b][in[i] & 0xff]; in[i] >>= 8; } cs[0] ^= crc64_table[3][in[i] & 0xff]; } while (data < end) { uint32_t idx = ((uint32_t) (cs[0] ^ *data++)) & 0xff; cs[0] = crc64_table[3][idx] ^ (cs[0] >> 8); } return cs[0] ^ UINT64_C(0xffffffffffffffff); } // Calculate the 'check bytes' for the provided checksum. If these bytes are // appended to the original buffer, then the new total checksum should be zero. static inline void crc64_invert(uint64_t cs, void *buffer) { unsigned char *bytes = (unsigned char *) buffer; cs ^= UINT64_C(0xffffffffffffffff); // The CRC is self-inverting (in big-endian, so the bit-reversed CRC is // self-inverting in little-endian). bytes[7] = (cs >> 56) & 0xff; bytes[6] = (cs >> 48) & 0xff; bytes[5] = (cs >> 40) & 0xff; bytes[4] = (cs >> 32) & 0xff; bytes[3] = (cs >> 24) & 0xff; bytes[2] = (cs >> 16) & 0xff; bytes[1] = (cs >> 8) & 0xff; bytes[0] = cs & 0xff; } static const uint64_t crc64_x_pow_2n[64] = { UINT64_C(0x4000000000000000), UINT64_C(0x2000000000000000), UINT64_C(0x0800000000000000), UINT64_C(0x0080000000000000), UINT64_C(0x0000800000000000), UINT64_C(0x0000000080000000), UINT64_C(0xc96c5795d7870f42), UINT64_C(0x6d5f4ad7e3c3afa0), UINT64_C(0xd49f7e445077d8ea), UINT64_C(0x040fb02a53c216fa), UINT64_C(0x6bec35957b9ef3a0), UINT64_C(0xb0e3bb0658964afe), UINT64_C(0x218578c7a2dff638), UINT64_C(0x6dbb920f24dd5cf2), UINT64_C(0x7a140cfcdb4d5eb5), UINT64_C(0x41b3705ecbc4057b), UINT64_C(0xd46ab656accac1ea), UINT64_C(0x329beda6fc34fb73), UINT64_C(0x51a4fcd4350b9797), UINT64_C(0x314fa85637efae9d), UINT64_C(0xacf27e9a1518d512), UINT64_C(0xffe2a3388a4d8ce7), UINT64_C(0x48b9697e60cc2e4e), UINT64_C(0xada73cb78dd62460), UINT64_C(0x3ea5454d8ce5c1bb), UINT64_C(0x5e84e3a6c70feaf1), UINT64_C(0x90fd49b66cbd81d1), UINT64_C(0xe2943e0c1db254e8), UINT64_C(0xecfa6adeca8834a1), UINT64_C(0xf513e212593ee321), UINT64_C(0xf36ae57331040916), UINT64_C(0x63fbd333b87b6717), UINT64_C(0xbd60f8e152f50b8b), UINT64_C(0xa5ce4a8299c1567d), UINT64_C(0x0bd445f0cbdb55ee), UINT64_C(0xfdd6824e20134285), UINT64_C(0xcead8b6ebda2227a), UINT64_C(0xe44b17e4f5d4fb5c), UINT64_C(0x9b29c81ad01ca7c5), UINT64_C(0x1b4366e40fea4055), UINT64_C(0x27bca1551aae167b), UINT64_C(0xaa57bcd1b39a5690), UINT64_C(0xd7fce83fa1234db9), UINT64_C(0xcce4986efea3ff8e), UINT64_C(0x3602a4d9e65341f1), UINT64_C(0x722b1da2df516145), UINT64_C(0xecfc3ddd3a08da83), UINT64_C(0x0fb96dcca83507e6), UINT64_C(0x125f2fe78d70f080), UINT64_C(0x842f50b7651aa516), UINT64_C(0x09bc34188cd9836f), UINT64_C(0xf43666c84196d909), UINT64_C(0xb56feb30c0df6ccb), UINT64_C(0xaa66e04ce7f30958), UINT64_C(0xb7b1187e9af29547), UINT64_C(0x113255f8476495de), UINT64_C(0x8fb19f783095d77e), UINT64_C(0xaec4aacc7c82b133), UINT64_C(0xf64e6d09218428cf), UINT64_C(0x036a72ea5ac258a0), UINT64_C(0x5235ef12eb7aaa6a), UINT64_C(0x2fed7b1685657853), UINT64_C(0x8ef8951d46606fb5), UINT64_C(0x9d58c1090f034d14) }; // Compute (a*b) mod P // See: https://code.google.com/p/crcutil/source/browse/code/gf_util.h static inline uint64_t crc64_multiply_(uint64_t a, uint64_t b) { if ((a ^ (a - 1)) < (b ^ (b - 1))) { uint64_t t = a; a = b; b = t; } if (a == 0) return 0; uint64_t r = 0, h = UINT64_C(1) << 63; for (; a != 0; a <<= 1) { if (a & h) { r ^= b; a ^= h; } b = (b >> 1) ^ ((b & 1) ? crc64_poly : 0); } return r; } // Compute x**n mod P static inline uint64_t crc64_x_pow_n_(uint64_t n) { uint64_t r = UINT64_C(1) << 63; for (size_t i = 0; n != 0; n >>= 1, ++i) { if (n & 1) r = crc64_multiply_(r, crc64_x_pow_2n[i]); } return r; } static inline uint64_t crc64_combine(uint64_t cs1, uint64_t cs2, size_t nbytes2) { // For M = CONCAT(M1, M2) => CRC(M, a) = CRC(M2, CRC(M1, a)) and: // CRC(M, b) = CRC(M, a) + ((b-a)x^|M|) mod P. return cs2 ^ crc64_multiply_(cs1, crc64_x_pow_n_(8 * nbytes2)); } static const size_t crc64_min_thread_bytes = 1024; static inline uint64_t crc64_omp(const void *input, size_t nbytes) { #ifdef _OPENMP if (nbytes > 2 * crc64_min_thread_bytes) { int nthreads = omp_get_max_threads(); if (nbytes < nthreads * crc64_min_thread_bytes) nthreads = nbytes / crc64_min_thread_bytes; uint64_t thread_cs[nthreads]; size_t thread_sz[nthreads]; const unsigned char *data = (const unsigned char*) input; #pragma omp parallel num_threads(nthreads) { int tid = omp_get_thread_num(); size_t bpt = nbytes / nthreads; const unsigned char *start = data + bpt * tid, *end; if (tid != nthreads - 1) end = start + bpt; else end = data + nbytes; size_t sz = end - start; thread_sz[tid] = sz; thread_cs[tid] = crc64(start, sz); } uint64_t cs = thread_cs[0]; for (int i = 1; i < nthreads; ++i) { cs = crc64_combine(cs, thread_cs[i], thread_sz[i]); } return cs; } #endif return crc64(input, nbytes); } #endif // CRC64_H
firstlastprivate.c
/* * firstlastprivate.c * * Created on: 02/04/2014 * Author: Carlos de la Torre */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif int main() { int i, n = 7; int a[n], suma = 0; for (i = 0; i < n; i++) a[i] = i; #pragma omp parallel for firstprivate(suma) lastprivate(suma) for (i = 0; i < n; i++) { suma = suma + a[i]; printf(" thread %d suma a[%d] suma=%d \n",omp_get_thread_num(), i, suma); } printf("\nFuera de la construcción parallel suma=%d\n", suma); return 0; }
matrix_multiplication.c
#include <stdio.h> #include <omp.h> #include <time.h> int main() { int arr[][3] = { {1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; int ans[3][3]; clock_t start = clock(); #pragma omp parallel { #pragma omp for for (int i = 0; i < 3; i++) { int res = 0; int k, j; for (k = 0; k < 3; k++) { for (j = 0; j < 3; j++) { res += arr[i][j] * arr[j][i]; } ans[i][k] = res; } } } clock_t end = clock(); clock_t diff = end - start; double time_taken = ((double)diff) / CLOCKS_PER_SEC; printf("Execution Time : %f seconds\n", time_taken); // Print the matrix #pragma omp parallel #pragma omp single { for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { printf("%d ", ans[i][j]); } printf("\n"); } } } /* Since we assumed the matrix size as 3, the above code can be changed to used sections instead for (int i = 0; i < 3; i++) { int res = 0; int k, j; for (k = 0; k < 3; k++) { for (j = 0; j < 3; j++) { res += arr[i][j] * arr[j][i]; } ans[i][k] = res; } } REPLACED BY #pragma omp parallel sections { #pragma omp section { int i=0; int res = 0; int k, j; for (k = 0; k < 3; k++) { for (j = 0; j < 3; j++) { res += arr[i][j] * arr[j][i]; } ans[i][k] = res; } } #pragma omp section { int i=1; int res = 0; int k, j; for (k = 0; k < 3; k++) { for (j = 0; j < 3; j++) { res += arr[i][j] * arr[j][i]; } ans[i][k] = res; } } #pragma omp section { int i=2; int res = 0; int k, j; for (k = 0; k < 3; k++) { for (j = 0; j < 3; j++) { res += arr[i][j] * arr[j][i]; } ans[i][k] = res; } } } */
9816.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp target teams distribute schedule(static, 14) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp target teams distribute schedule(static, 14) for (i = 0; i < _PB_N; i++) { #pragma omp for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp target teams distribute schedule(static, 14) for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
helloThreads.c
// Compile with // gcc -fopenmp -o helloT helloThreads.c #include <omp.h> #include <stdio.h> int main() { #pragma omp parallel printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads()); } // gcc -fopenmp -o helloThreads helloThreads.c
program_evaluator.h
// Ceres Solver - A fast non-linear least squares minimizer // Copyright 2015 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: keir@google.com (Keir Mierle) // // The ProgramEvaluator runs the cost functions contained in each residual block // and stores the result into a jacobian. The particular type of jacobian is // abstracted out using two template parameters: // // - An "EvaluatePreparer" that is responsible for creating the array with // pointers to the jacobian blocks where the cost function evaluates to. // - A "JacobianWriter" that is responsible for storing the resulting // jacobian blocks in the passed sparse matrix. // // This abstraction affords an efficient evaluator implementation while still // supporting writing to multiple sparse matrix formats. For example, when the // ProgramEvaluator is parameterized for writing to block sparse matrices, the // residual jacobians are written directly into their final position in the // block sparse matrix by the user's CostFunction; there is no copying. // // The evaluation is threaded with OpenMP. // // The EvaluatePreparer and JacobianWriter interfaces are as follows: // // class EvaluatePreparer { // // Prepare the jacobians array for use as the destination of a call to // // a cost function's evaluate method. // void Prepare(const ResidualBlock* residual_block, // int residual_block_index, // SparseMatrix* jacobian, // double** jacobians); // } // // class JacobianWriter { // // Create a jacobian that this writer can write. Same as // // Evaluator::CreateJacobian. // SparseMatrix* CreateJacobian() const; // // // Create num_threads evaluate preparers. Caller owns result which must // // be freed with delete[]. Resulting preparers are valid while *this is. // EvaluatePreparer* CreateEvaluatePreparers(int num_threads); // // // Write the block jacobians from a residual block evaluation to the // // larger sparse jacobian. // void Write(int residual_id, // int residual_offset, // double** jacobians, // SparseMatrix* jacobian); // } // // Note: The ProgramEvaluator is not thread safe, since internally it maintains // some per-thread scratch space. #ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_ #define CERES_INTERNAL_PROGRAM_EVALUATOR_H_ // This include must come before any #ifndef check on Ceres compile options. #include "ceres/internal/port.h" #ifdef CERES_USE_OPENMP #include <omp.h> #endif #include <map> #include <string> #include <vector> #include "ceres/execution_summary.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/parameter_block.h" #include "ceres/program.h" #include "ceres/residual_block.h" #include "ceres/small_blas.h" namespace ceres { namespace internal { struct NullJacobianFinalizer { void operator()(SparseMatrix* jacobian, int num_parameters) {} }; template<typename EvaluatePreparer, typename JacobianWriter, typename JacobianFinalizer = NullJacobianFinalizer> class ProgramEvaluator : public Evaluator { public: ProgramEvaluator(const Evaluator::Options &options, Program* program) : options_(options), program_(program), jacobian_writer_(options, program), evaluate_preparers_( jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) { #ifndef CERES_USE_OPENMP if (options_.num_threads > 1) { LOG(WARNING) << "OpenMP support is not compiled into this binary; " << "only options.num_threads = 1 is supported. Switching " << "to single threaded mode."; options_.num_threads = 1; } #endif BuildResidualLayout(*program, &residual_layout_); evaluate_scratch_.reset(CreateEvaluatorScratch(*program, options.num_threads)); } // Implementation of Evaluator interface. SparseMatrix* CreateJacobian() const { return jacobian_writer_.CreateJacobian(); } bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options, const double* state, double* cost, double* residuals, double* gradient, SparseMatrix* jacobian) { ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_); ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL ? "Evaluator::Residual" : "Evaluator::Jacobian", &execution_summary_); // The parameters are stateful, so set the state before evaluating. if (!program_->StateVectorToParameterBlocks(state)) { return false; } if (residuals != NULL) { VectorRef(residuals, program_->NumResiduals()).setZero(); } if (jacobian != NULL) { jacobian->SetZero(); } // Each thread gets it's own cost and evaluate scratch space. for (int i = 0; i < options_.num_threads; ++i) { evaluate_scratch_[i].cost = 0.0; if (gradient != NULL) { VectorRef(evaluate_scratch_[i].gradient.get(), program_->NumEffectiveParameters()).setZero(); } } // This bool is used to disable the loop if an error is encountered // without breaking out of it. The remaining loop iterations are still run, // but with an empty body, and so will finish quickly. bool abort = false; int num_residual_blocks = program_->NumResidualBlocks(); #pragma omp parallel for num_threads(options_.num_threads) for (int i = 0; i < num_residual_blocks; ++i) { // Disable the loop instead of breaking, as required by OpenMP. #pragma omp flush(abort) if (abort) { continue; } #ifdef CERES_USE_OPENMP int thread_id = omp_get_thread_num(); #else int thread_id = 0; #endif EvaluatePreparer* preparer = &evaluate_preparers_[thread_id]; EvaluateScratch* scratch = &evaluate_scratch_[thread_id]; // Prepare block residuals if requested. const ResidualBlock* residual_block = program_->residual_blocks()[i]; double* block_residuals = NULL; if (residuals != NULL) { block_residuals = residuals + residual_layout_[i]; } else if (gradient != NULL) { block_residuals = scratch->residual_block_residuals.get(); } // Prepare block jacobians if requested. double** block_jacobians = NULL; if (jacobian != NULL || gradient != NULL) { preparer->Prepare(residual_block, i, jacobian, scratch->jacobian_block_ptrs.get()); block_jacobians = scratch->jacobian_block_ptrs.get(); } // Evaluate the cost, residuals, and jacobians. double block_cost; if (!residual_block->Evaluate( evaluate_options.apply_loss_function, &block_cost, block_residuals, block_jacobians, scratch->residual_block_evaluate_scratch.get())) { abort = true; // This ensures that the OpenMP threads have a consistent view of 'abort'. Do // the flush inside the failure case so that there is usually only one // synchronization point per loop iteration instead of two. #pragma omp flush(abort) continue; } scratch->cost += block_cost; // Store the jacobians, if they were requested. if (jacobian != NULL) { jacobian_writer_.Write(i, residual_layout_[i], block_jacobians, jacobian); } // Compute and store the gradient, if it was requested. if (gradient != NULL) { int num_residuals = residual_block->NumResiduals(); int num_parameter_blocks = residual_block->NumParameterBlocks(); for (int j = 0; j < num_parameter_blocks; ++j) { const ParameterBlock* parameter_block = residual_block->parameter_blocks()[j]; if (parameter_block->IsConstant()) { continue; } MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( block_jacobians[j], num_residuals, parameter_block->LocalSize(), block_residuals, scratch->gradient.get() + parameter_block->delta_offset()); } } } if (!abort) { const int num_parameters = program_->NumEffectiveParameters(); // Sum the cost and gradient (if requested) from each thread. (*cost) = 0.0; if (gradient != NULL) { VectorRef(gradient, num_parameters).setZero(); } for (int i = 0; i < options_.num_threads; ++i) { (*cost) += evaluate_scratch_[i].cost; if (gradient != NULL) { VectorRef(gradient, num_parameters) += VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters); } } // Finalize the Jacobian if it is available. // `num_parameters` is passed to the finalizer so that additional // storage can be reserved for additional diagonal elements if // necessary. if (jacobian != NULL) { JacobianFinalizer f; f(jacobian, num_parameters); } } return !abort; } bool Plus(const double* state, const double* delta, double* state_plus_delta) const { return program_->Plus(state, delta, state_plus_delta); } int NumParameters() const { return program_->NumParameters(); } int NumEffectiveParameters() const { return program_->NumEffectiveParameters(); } int NumResiduals() const { return program_->NumResiduals(); } virtual std::map<std::string, int> CallStatistics() const { return execution_summary_.calls(); } virtual std::map<std::string, double> TimeStatistics() const { return execution_summary_.times(); } private: // Per-thread scratch space needed to evaluate and store each residual block. struct EvaluateScratch { void Init(int max_parameters_per_residual_block, int max_scratch_doubles_needed_for_evaluate, int max_residuals_per_residual_block, int num_parameters) { residual_block_evaluate_scratch.reset( new double[max_scratch_doubles_needed_for_evaluate]); gradient.reset(new double[num_parameters]); VectorRef(gradient.get(), num_parameters).setZero(); residual_block_residuals.reset( new double[max_residuals_per_residual_block]); jacobian_block_ptrs.reset( new double*[max_parameters_per_residual_block]); } double cost; scoped_array<double> residual_block_evaluate_scratch; // The gradient in the local parameterization. scoped_array<double> gradient; // Enough space to store the residual for the largest residual block. scoped_array<double> residual_block_residuals; scoped_array<double*> jacobian_block_ptrs; }; static void BuildResidualLayout(const Program& program, std::vector<int>* residual_layout) { const std::vector<ResidualBlock*>& residual_blocks = program.residual_blocks(); residual_layout->resize(program.NumResidualBlocks()); int residual_pos = 0; for (int i = 0; i < residual_blocks.size(); ++i) { const int num_residuals = residual_blocks[i]->NumResiduals(); (*residual_layout)[i] = residual_pos; residual_pos += num_residuals; } } // Create scratch space for each thread evaluating the program. static EvaluateScratch* CreateEvaluatorScratch(const Program& program, int num_threads) { int max_parameters_per_residual_block = program.MaxParametersPerResidualBlock(); int max_scratch_doubles_needed_for_evaluate = program.MaxScratchDoublesNeededForEvaluate(); int max_residuals_per_residual_block = program.MaxResidualsPerResidualBlock(); int num_parameters = program.NumEffectiveParameters(); EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads]; for (int i = 0; i < num_threads; i++) { evaluate_scratch[i].Init(max_parameters_per_residual_block, max_scratch_doubles_needed_for_evaluate, max_residuals_per_residual_block, num_parameters); } return evaluate_scratch; } Evaluator::Options options_; Program* program_; JacobianWriter jacobian_writer_; scoped_array<EvaluatePreparer> evaluate_preparers_; scoped_array<EvaluateScratch> evaluate_scratch_; std::vector<int> residual_layout_; ::ceres::internal::ExecutionSummary execution_summary_; }; } // namespace internal } // namespace ceres #endif // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
GB_binop__bshift_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint16) // C=scalar+B GB (_bind1st__bshift_uint16) // C=scalar+B' GB (_bind1st_tran__bshift_uint16) // C=A+scalar GB (_bind2nd__bshift_uint16) // C=A'+scalar GB (_bind2nd_tran__bshift_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_bitshift_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT16 || GxB_NO_BSHIFT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_bool_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_int8) // op(A') function: GB (_unop_tran__identity_bool_int8) // C type: bool // A type: int8_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_int8) ( bool *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
jacobi-2d-imper_orig_tileonly_size16_tile_8_2_1.c
/** * jacobi-2d-imper.c: This file is part of the PolyBench/C 3.2 test suite. * * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 20x1000. */ #include "jacobi-2d-imper.h" /* Array initialization. */ static void init_array(int n,double A[16 + 0][16 + 0],double B[16 + 0][16 + 0]) { int i; int j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { A[i][j] = (((double )i) * (j + 2) + 2) / n; B[i][j] = (((double )i) * (j + 3) + 3) / n; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n,double A[16 + 0][16 + 0]) { int i; int j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { fprintf(stderr,"%0.2lf ",A[i][j]); if ((i * n + j) % 20 == 0) { fprintf(stderr,"\n"); } } fprintf(stderr,"\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ #pragma verify dataracefree tsteps==2 n==16 static void kernel_jacobi_2d_imper(int tsteps,int n,double A[16 + 0][16 + 0],double B[16 + 0][16 + 0]) { int t; int i; int j; #pragma scop { int c2; int c0; int c1; int c3; int c4; if (n >= 3 && tsteps >= 1) { for (c0 = 0; c0 <= (((4 * n + 9 * tsteps + -13) * 8 < 0?((8 < 0?-((-(4 * n + 9 * tsteps + -13) + 8 + 1) / 8) : -((-(4 * n + 9 * tsteps + -13) + 8 - 1) / 8))) : (4 * n + 9 * tsteps + -13) / 8)); c0++) { #pragma omp parallel for private(c4, c3, c2) for (c1 = ((((8 * c0 + -1 * tsteps + 1) * 8 < 0?-(-(8 * c0 + -1 * tsteps + 1) / 8) : ((8 < 0?(-(8 * c0 + -1 * tsteps + 1) + - 8 - 1) / - 8 : (8 * c0 + -1 * tsteps + 1 + 8 - 1) / 8)))) > ((8 * c0 * 9 < 0?-(-(8 * c0) / 9) : ((9 < 0?(-(8 * c0) + - 9 - 1) / - 9 : (8 * c0 + 9 - 1) / 9))))?(((8 * c0 + -1 * tsteps + 1) * 8 < 0?-(-(8 * c0 + -1 * tsteps + 1) / 8) : ((8 < 0?(-(8 * c0 + -1 * tsteps + 1) + - 8 - 1) / - 8 : (8 * c0 + -1 * tsteps + 1 + 8 - 1) / 8)))) : ((8 * c0 * 9 < 0?-(-(8 * c0) / 9) : ((9 < 0?(-(8 * c0) + - 9 - 1) / - 9 : (8 * c0 + 9 - 1) / 9))))); c1 <= (((((((n + 2 * tsteps + -3) * 2 < 0?((2 < 0?-((-(n + 2 * tsteps + -3) + 2 + 1) / 2) : -((-(n + 2 * tsteps + -3) + 2 - 1) / 2))) : (n + 2 * tsteps + -3) / 2)) < (((16 * c0 + n + 13) * 18 < 0?((18 < 0?-((-(16 * c0 + n + 13) + 18 + 1) / 18) : -((-(16 * c0 + n + 13) + 18 - 1) / 18))) : (16 * c0 + n + 13) / 18))?(((n + 2 * tsteps + -3) * 2 < 0?((2 < 0?-((-(n + 2 * tsteps + -3) + 2 + 1) / 2) : -((-(n + 2 * tsteps + -3) + 2 - 1) / 2))) : (n + 2 * tsteps + -3) / 2)) : (((16 * c0 + n + 13) * 18 < 0?((18 < 0?-((-(16 * c0 + n + 13) + 18 + 1) / 18) : -((-(16 * c0 + n + 13) + 18 - 1) / 18))) : (16 * c0 + n + 13) / 18)))) < c0?(((((n + 2 * tsteps + -3) * 2 < 0?((2 < 0?-((-(n + 2 * tsteps + -3) + 2 + 1) / 2) : -((-(n + 2 * tsteps + -3) + 2 - 1) / 2))) : (n + 2 * tsteps + -3) / 2)) < (((16 * c0 + n + 13) * 18 < 0?((18 < 0?-((-(16 * c0 + n + 13) + 18 + 1) / 18) : -((-(16 * c0 + n + 13) + 18 - 1) / 18))) : (16 * c0 + n + 13) / 18))?(((n + 2 * tsteps + -3) * 2 < 0?((2 < 0?-((-(n + 2 * tsteps + -3) + 2 + 1) / 2) : -((-(n + 2 * tsteps + -3) + 2 - 1) / 2))) : (n + 2 * tsteps + -3) / 2)) : (((16 * c0 + n + 13) * 18 < 0?((18 < 0?-((-(16 * c0 + n + 13) + 18 + 1) / 18) : -((-(16 * c0 + n + 13) + 18 - 1) / 18))) : (16 * c0 + n + 13) / 18)))) : c0)); c1++) { for (c2 = (16 * c0 + -16 * c1 + 1 > 2 * c1 + -1 * n + 3?16 * c0 + -16 * c1 + 1 : 2 * c1 + -1 * n + 3); c2 <= ((((2 * c1 + n + -2 < n + 2 * tsteps + -3?2 * c1 + n + -2 : n + 2 * tsteps + -3)) < 16 * c0 + -16 * c1 + n + 13?((2 * c1 + n + -2 < n + 2 * tsteps + -3?2 * c1 + n + -2 : n + 2 * tsteps + -3)) : 16 * c0 + -16 * c1 + n + 13)); c2++) { if (c0 <= (((16 * c1 + c2 + -1 * n + 1) * 16 < 0?((16 < 0?-((-(16 * c1 + c2 + -1 * n + 1) + 16 + 1) / 16) : -((-(16 * c1 + c2 + -1 * n + 1) + 16 - 1) / 16))) : (16 * c1 + c2 + -1 * n + 1) / 16)) && c1 <= (((c2 + -1) * 2 < 0?((2 < 0?-((-(c2 + -1) + 2 + 1) / 2) : -((-(c2 + -1) + 2 - 1) / 2))) : (c2 + -1) / 2))) { if ((c2 + n + 1) % 2 == 0) { for (c4 = 2 * c1; c4 <= 2 * c1 + 1; c4++) { A[-1 * c2 + c4 + n + -2][n + -2] = B[-1 * c2 + c4 + n + -2][n + -2]; } } } if (c0 <= (((18 * c1 + -1 * n + 1) * 16 < 0?((16 < 0?-((-(18 * c1 + -1 * n + 1) + 16 + 1) / 16) : -((-(18 * c1 + -1 * n + 1) + 16 - 1) / 16))) : (18 * c1 + -1 * n + 1) / 16)) && c1 >= ((c2 * 2 < 0?-(-c2 / 2) : ((2 < 0?(-c2 + - 2 - 1) / - 2 : (c2 + 2 - 1) / 2))))) { if ((n + 1) % 2 == 0) { A[n + -2][-2 * c1 + c2 + n + -2] = B[n + -2][-2 * c1 + c2 + n + -2]; } } for (c3 = ((((((2 * c1 + -1 * n + 2) * 2 < 0?-(-(2 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(2 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (2 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((c2 + -1 * n + 2) * 2 < 0?-(-(c2 + -1 * n + 2) / 2) : ((2 < 0?(-(c2 + -1 * n + 2) + - 2 - 1) / - 2 : (c2 + -1 * n + 2 + 2 - 1) / 2))))?(((2 * c1 + -1 * n + 2) * 2 < 0?-(-(2 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(2 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (2 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((c2 + -1 * n + 2) * 2 < 0?-(-(c2 + -1 * n + 2) / 2) : ((2 < 0?(-(c2 + -1 * n + 2) + - 2 - 1) / - 2 : (c2 + -1 * n + 2 + 2 - 1) / 2)))))) > 8 * c0 + -8 * c1?(((((2 * c1 + -1 * n + 2) * 2 < 0?-(-(2 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(2 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (2 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((c2 + -1 * n + 2) * 2 < 0?-(-(c2 + -1 * n + 2) / 2) : ((2 < 0?(-(c2 + -1 * n + 2) + - 2 - 1) / - 2 : (c2 + -1 * n + 2 + 2 - 1) / 2))))?(((2 * c1 + -1 * n + 2) * 2 < 0?-(-(2 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(2 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (2 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((c2 + -1 * n + 2) * 2 < 0?-(-(c2 + -1 * n + 2) / 2) : ((2 < 0?(-(c2 + -1 * n + 2) + - 2 - 1) / - 2 : (c2 + -1 * n + 2 + 2 - 1) / 2)))))) : 8 * c0 + -8 * c1); c3 <= (((((((((c2 + -2) * 2 < 0?((2 < 0?-((-(c2 + -2) + 2 + 1) / 2) : -((-(c2 + -2) + 2 - 1) / 2))) : (c2 + -2) / 2)) < c1 + -1?(((c2 + -2) * 2 < 0?((2 < 0?-((-(c2 + -2) + 2 + 1) / 2) : -((-(c2 + -2) + 2 - 1) / 2))) : (c2 + -2) / 2)) : c1 + -1)) < tsteps + -1?(((((c2 + -2) * 2 < 0?((2 < 0?-((-(c2 + -2) + 2 + 1) / 2) : -((-(c2 + -2) + 2 - 1) / 2))) : (c2 + -2) / 2)) < c1 + -1?(((c2 + -2) * 2 < 0?((2 < 0?-((-(c2 + -2) + 2 + 1) / 2) : -((-(c2 + -2) + 2 - 1) / 2))) : (c2 + -2) / 2)) : c1 + -1)) : tsteps + -1)) < 8 * c0 + -8 * c1 + 7?(((((((c2 + -2) * 2 < 0?((2 < 0?-((-(c2 + -2) + 2 + 1) / 2) : -((-(c2 + -2) + 2 - 1) / 2))) : (c2 + -2) / 2)) < c1 + -1?(((c2 + -2) * 2 < 0?((2 < 0?-((-(c2 + -2) + 2 + 1) / 2) : -((-(c2 + -2) + 2 - 1) / 2))) : (c2 + -2) / 2)) : c1 + -1)) < tsteps + -1?(((((c2 + -2) * 2 < 0?((2 < 0?-((-(c2 + -2) + 2 + 1) / 2) : -((-(c2 + -2) + 2 - 1) / 2))) : (c2 + -2) / 2)) < c1 + -1?(((c2 + -2) * 2 < 0?((2 < 0?-((-(c2 + -2) + 2 + 1) / 2) : -((-(c2 + -2) + 2 - 1) / 2))) : (c2 + -2) / 2)) : c1 + -1)) : tsteps + -1)) : 8 * c0 + -8 * c1 + 7)); c3++) { for (c4 = 2 * c1; c4 <= ((2 * c1 + 1 < 2 * c3 + n + -2?2 * c1 + 1 : 2 * c3 + n + -2)); c4++) { A[-2 * c3 + c4 + -1][c2 + -2 * c3 + -1] = B[-2 * c3 + c4 + -1][c2 + -2 * c3 + -1]; B[-2 * c3 + c4][c2 + -2 * c3] = 0.2 * (A[-2 * c3 + c4][c2 + -2 * c3] + A[-2 * c3 + c4][c2 + -2 * c3 - 1] + A[-2 * c3 + c4][1 + (c2 + -2 * c3)] + A[1 + (-2 * c3 + c4)][c2 + -2 * c3] + A[-2 * c3 + c4 - 1][c2 + -2 * c3]); } if (2 * c1 == 2 * c3 + n + -2) { if (n % 2 == 0) { A[n + -2][-2 * c1 + c2 + n + -3] = B[n + -2][-2 * c1 + c2 + n + -3]; } } } if (c0 >= (((16 * c1 + c2 + -15) * 16 < 0?-(-(16 * c1 + c2 + -15) / 16) : ((16 < 0?(-(16 * c1 + c2 + -15) + - 16 - 1) / - 16 : (16 * c1 + c2 + -15 + 16 - 1) / 16)))) && c1 >= (((c2 + 1) * 2 < 0?-(-(c2 + 1) / 2) : ((2 < 0?(-(c2 + 1) + - 2 - 1) / - 2 : (c2 + 1 + 2 - 1) / 2)))) && c2 <= 2 * tsteps + -1) { if ((c2 + 1) % 2 == 0) { for (c4 = 2 * c1; c4 <= ((2 * c1 + 1 < c2 + n + -3?2 * c1 + 1 : c2 + n + -3)); c4++) { B[-1 * c2 + c4 + 1][1] = 0.2 * (A[-1 * c2 + c4 + 1][1] + A[-1 * c2 + c4 + 1][1 - 1] + A[-1 * c2 + c4 + 1][1 + 1] + A[1 + (-1 * c2 + c4 + 1)][1] + A[-1 * c2 + c4 + 1 - 1][1]); } } } if (c0 >= (((9 * c1 + -7) * 8 < 0?-(-(9 * c1 + -7) / 8) : ((8 < 0?(-(9 * c1 + -7) + - 8 - 1) / - 8 : (9 * c1 + -7 + 8 - 1) / 8)))) && c1 <= (((((c2 + -1) * 2 < 0?((2 < 0?-((-(c2 + -1) + 2 + 1) / 2) : -((-(c2 + -1) + 2 - 1) / 2))) : (c2 + -1) / 2)) < tsteps + -1?(((c2 + -1) * 2 < 0?((2 < 0?-((-(c2 + -1) + 2 + 1) / 2) : -((-(c2 + -1) + 2 - 1) / 2))) : (c2 + -1) / 2)) : tsteps + -1))) { B[1][-2 * c1 + c2] = 0.2 * (A[1][-2 * c1 + c2] + A[1][-2 * c1 + c2 - 1] + A[1][1 + (-2 * c1 + c2)] + A[1 + 1][-2 * c1 + c2] + A[1 - 1][-2 * c1 + c2]); } } } } } } #pragma endscop } #if 1 int main() { return 0; } #else int main(int argc,char **argv) { /* Retrieve problem size. */ int n = 16; int tsteps = 2; /* Variable declaration/allocation. */ double (*A)[16 + 0][16 + 0]; A = ((double (*)[16 + 0][16 + 0])(polybench_alloc_data(((16 + 0) * (16 + 0)),(sizeof(double ))))); ; double (*B)[16 + 0][16 + 0]; B = ((double (*)[16 + 0][16 + 0])(polybench_alloc_data(((16 + 0) * (16 + 0)),(sizeof(double ))))); ; /* Initialize array(s). */ init_array(n, *A, *B); /* Start timer. */ ; /* Run kernel. */ kernel_jacobi_2d_imper(tsteps,n, *A, *B); /* Stop and print timer. */ ; ; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ if (argc > 42 && !strcmp(argv[0],"")) { print_array(n, *A); } /* Be clean. */ free(((void *)A)); ; free(((void *)B)); ; return 0; } #endif
main.c
/*BHEADER**************************************************************** * (c) 2007 The Regents of the University of California * * * * See the file COPYRIGHT_and_DISCLAIMER for a complete copyright * * notice and disclaimer. * * * *EHEADER****************************************************************/ //-------------- // A micro kernel //-------------- #include <stdio.h> #include <stdlib.h> #include "omp.h" #include "headers.h" // const int testIter = 500; double totalWallTime = 0.0; // void test_Matvec(); void test_Relax(); void test_Axpy(); // int main(int argc, char *argv[]) { double t0 = 0.0, t1 = 0.0, del_wtime = 0.0; int max_num_threads; printf("\n"); printf("//------------ \n"); printf("// \n"); printf("// CORAL AMGmk Benchmark Version 1.0 \n"); printf("// \n"); printf("//------------ \n"); #pragma omp parallel #pragma omp master max_num_threads = omp_get_num_threads(); printf("\nmax_num_threads = %d \n\n",max_num_threads ); printf("\n testIter = %d \n\n", testIter ); t0 = omp_get_wtime(); // Matvec totalWallTime = 0.0; test_Matvec(); printf("\n"); printf("//------------ \n"); printf("// \n"); printf("// MATVEC\n"); printf("// \n"); printf("//------------ \n"); printf("\nWall time = %f seconds. \n", totalWallTime); // Relax totalWallTime = 0.0; test_Relax(); printf("\n"); printf("//------------ \n"); printf("// \n"); printf("// Relax\n"); printf("// \n"); printf("//------------ \n"); printf("\nWall time = %f seconds. \n", totalWallTime); // Axpy totalWallTime = 0.0; test_Axpy(); printf("\n"); printf("//------------ \n"); printf("// \n"); printf("// Axpy\n"); printf("// \n"); printf("//------------ \n"); printf("\nWall time = %f seconds. \n", totalWallTime); t1 = omp_get_wtime();; del_wtime = t1 - t0; printf("\nTotal Wall time = %f seconds. \n", del_wtime); return 0; } void test_Matvec() { double t0 = 0.0, t1 = 0.0; hypre_CSRMatrix *A; hypre_Vector *x, *y, *sol; int nx, ny, nz, i; double *values; double *y_data, *sol_data; double error, diff; nx = 50; /* size per proc nx*ny*nz */ ny = 50; nz = 50; values = hypre_CTAlloc(double, 4); values[0] = 6; values[1] = -1; values[2] = -1; values[3] = -1; A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol); hypre_SeqVectorSetConstantValues(x,1); hypre_SeqVectorSetConstantValues(y,0); t0 = omp_get_wtime(); for (i=0; i<testIter; ++i) hypre_CSRMatrixMatvec(1,A,x,0,y); t1 = omp_get_wtime() ; totalWallTime += t1 - t0; y_data = hypre_VectorData(y); sol_data = hypre_VectorData(sol); error = 0; for (i=0; i < nx*ny*nz; i++) { diff = fabs(y_data[i]-sol_data[i]); if (diff > error) error = diff; } if (error > 0) printf(" \n Matvec: error: %e\n", error); hypre_TFree(values); hypre_CSRMatrixDestroy(A); hypre_SeqVectorDestroy(x); hypre_SeqVectorDestroy(y); hypre_SeqVectorDestroy(sol); } void test_Relax() { double t0 = 0.0, t1 = 0.0; hypre_CSRMatrix *A; hypre_Vector *x, *y, *sol; int nx, ny, nz, i; double *values; double *x_data; double diff, error; nx = 50; /* size per proc nx*ny*nz */ ny = 50; nz = 50; values = hypre_CTAlloc(double, 4); values[0] = 6; values[1] = -1; values[2] = -1; values[3] = -1; A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol); hypre_SeqVectorSetConstantValues(x,1); t0 = omp_get_wtime(); for (i=0; i<testIter; ++i) hypre_BoomerAMGSeqRelax(A, sol, x); t1 = omp_get_wtime(); totalWallTime += t1 - t0; x_data = hypre_VectorData(x); error = 0; for (i=0; i < nx*ny*nz; i++) { diff = fabs(x_data[i]-1); if (diff > error) error = diff; } if (error > 0) printf(" \n Relax: error: %e\n", error); hypre_TFree(values); hypre_CSRMatrixDestroy(A); hypre_SeqVectorDestroy(x); hypre_SeqVectorDestroy(y); hypre_SeqVectorDestroy(sol); } void test_Axpy() { double t0 = 0.0, t1 = 0.0; hypre_Vector *x, *y; int nx, i; double alpha=0.5; double diff, error; double *y_data; nx = 125000; /* size per proc */ x = hypre_SeqVectorCreate(nx); y = hypre_SeqVectorCreate(nx); hypre_SeqVectorInitialize(x); hypre_SeqVectorInitialize(y); hypre_SeqVectorSetConstantValues(x,1); hypre_SeqVectorSetConstantValues(y,1); t0 = omp_get_wtime(); for (i=0; i<testIter; ++i) hypre_SeqVectorAxpy(alpha,x,y); t1 = omp_get_wtime(); y_data = hypre_VectorData(y); error = 0; for (i=0; i < nx; i++) { diff = fabs(y_data[i]-1-0.5*(double)testIter); if (diff > error) error = diff; } if (error > 0) printf(" \n Axpy: error: %e\n", error); totalWallTime += t1 - t0; hypre_SeqVectorDestroy(x); hypre_SeqVectorDestroy(y); }
omp_sum_strnum_tls2.c
/* vim: set ts=4 sw=4: */ /* Filename : sum_strnum_tls.c * Description : specified-data key (pthread TLS) * Author : SunYoung Kim <sunyzero@gmail.com> * Notes : */ #define _XOPEN_SOURCE 600 #include <pthread.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <omp.h> #include "stdalsp.h" #define NUM_THREADS 3 #define LEN_SUM_STR 16 char *sum_strnum(const char *, const char *); int main() { #pragma omp parallel #pragma omp single { #pragma omp task { char *x = "1", *y = "3"; char *ret_str = sum_strnum(x, y); if (omp_get_thread_num() == 0) usleep(500000); printf("%s + %s = %s (%p)\n", x, y, ret_str, ret_str); } #pragma omp task { char *x = "4", *y = "4"; char *ret_str = sum_strnum(x, y); #pragma omp taskyield sleep(1); printf("%s + %s = %s (%p)\n", x, y, ret_str, ret_str); } #pragma omp task { char *x = "1", *y = "5"; char *ret_str = sum_strnum(x, y); #pragma omp taskyield sleep(2); printf("%s + %s = %s (%p)\n", x, y, ret_str, ret_str); } #pragma omp task { char *x = "2", *y = "5"; char *ret_str = sum_strnum(x, y); #pragma omp taskyield if (omp_get_thread_num() == 0) usleep(500000); printf("%s + %s = %s (%p)\n", x, y, ret_str, ret_str); } #pragma omp task { char *x = "2", *y = "1"; char *ret_str = sum_strnum(x, y); #pragma omp taskyield if (omp_get_thread_num() == 0) usleep(500000); printf("%s + %s = %s (%p)\n", x, y, ret_str, ret_str); } #pragma omp task { char *x = "5", *y = "4"; char *ret_str = sum_strnum(x, y); #pragma omp taskyield if (omp_get_thread_num() == 0) usleep(500000); printf("%s + %s = %s (%p)\n", x, y, ret_str, ret_str); } #pragma omp task { char *x = "7", *y = "8"; char *ret_str = sum_strnum(x, y); #pragma omp taskyield if (omp_get_thread_num() == 0) usleep(500000); printf("%s + %s = %s (%p)\n", x, y, ret_str, ret_str); } } /* omp parallel, sections */ return EXIT_SUCCESS; } /* apply pthread specific key */ char *sum_strnum(const char *s1, const char *s2) { static char tls_str[LEN_SUM_STR]; #pragma omp threadprivate(tls_str) snprintf(tls_str, LEN_SUM_STR, "%d", atoi(s1) + atoi(s2)); return tls_str; }
GB_unop_transpose.c
//------------------------------------------------------------------------------ // GB_unop_transpose: C=op(cast(A')), transpose, typecast, and apply op //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // This method is parallel, but not highly scalable. It uses only naslice = // nnz(A)/(A->vlen) threads. Each thread requires O(vlen) workspace. { // Ax unused for some uses of this template #include "GB_unused.h" //-------------------------------------------------------------------------- // get A and C //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Ai = A->i ; #if defined ( GB_PHASE_2_OF_2 ) const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ; int64_t *GB_RESTRICT Ci = C->i ; GB_CTYPE *GB_RESTRICT Cx = (GB_CTYPE *) C->x ; #endif //-------------------------------------------------------------------------- // C = op (cast (A')) //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(naslice) schedule(static) for (taskid = 0 ; taskid < naslice ; taskid++) { // get the rowcount for this slice, of size A->vlen int64_t *GB_RESTRICT rowcount = Rowcounts [taskid] ; for (int64_t Iter_k = A_slice [taskid] ; Iter_k < A_slice [taskid+1] ; Iter_k++) { GBI_jth_iteration_with_iter (Iter, j, pA, pA_end) ; for ( ; pA < pA_end ; pA++) { #if defined ( GB_PHASE_1_OF_2) // count one more entry in C(i,:) for this slice rowcount [Ai [pA]]++ ; #else // insert the entry into C(i,:) for this slice int64_t pC = rowcount [Ai [pA]]++ ; Ci [pC] = j ; // Cx [pC] = op (Ax [pA]) GB_CAST_OP (pC, pA) ; #endif } } } }
GB_binop__iseq_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_fp64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__iseq_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fp64) // A*D function (colscale): GB (_AxD__iseq_fp64) // D*A function (rowscale): GB (_DxB__iseq_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fp64) // C=scalar+B GB (_bind1st__iseq_fp64) // C=scalar+B' GB (_bind1st_tran__iseq_fp64) // C=A+scalar GB (_bind2nd__iseq_fp64) // C=A'+scalar GB (_bind2nd_tran__iseq_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_FP64 || GxB_NO_ISEQ_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__iseq_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__times_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__times_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__times_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__times_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fp32) // A*D function (colscale): GB (_AxD__times_fp32) // D*A function (rowscale): GB (_DxB__times_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__times_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__times_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fp32) // C=scalar+B GB (_bind1st__times_fp32) // C=scalar+B' GB (_bind1st_tran__times_fp32) // C=A+scalar GB (_bind2nd__times_fp32) // C=A'+scalar GB (_bind2nd_tran__times_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_FP32 || GxB_NO_TIMES_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__times_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(16*t1+Ny+29,32)),floord(32*t2+Ny+28,32)),floord(32*t1-32*t2+Nz+Ny+27,32));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(32*t2-Nz-124,128)),ceild(32*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(16*t1+Nx+29,128)),floord(32*t2+Nx+28,128)),floord(32*t3+Nx+28,128)),floord(32*t1-32*t2+Nz+Nx+27,128));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),32*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),32*t3+30),128*t4+126),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unaryop__abs_uint16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint16_int32 // op(A') function: GB_tran__abs_uint16_int32 // C type: uint16_t // A type: int32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint16_int32 ( uint16_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
draw-private.h
/* Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization dedicated to making software imaging solutions freely available. You may not use this file except in compliance with the License. obtain a copy of the License at http://www.imagemagick.org/script/license.php Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. MagickCore private image drawing methods. */ #ifndef _MAGICKCORE_DRAW_PRIVATE_H #define _MAGICKCORE_DRAW_PRIVATE_H #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #include "magick/cache.h" #include "magick/image.h" #include "magick/memory_.h" static inline MagickBooleanType GetFillColor(const DrawInfo *draw_info, const ssize_t x,const ssize_t y,PixelPacket *pixel) { Image *pattern; MagickBooleanType status; pattern=draw_info->fill_pattern; if (pattern == (Image *) NULL) { *pixel=draw_info->fill; return(MagickTrue); } #if defined(MAGICKCORE_OPENMP_SUPPORT) && (_OPENMP >= 200203) #pragma omp critical #endif status=GetOneVirtualMethodPixel(pattern,TileVirtualPixelMethod, x+pattern->tile_offset.x,y+pattern->tile_offset.y,pixel, &pattern->exception); if (pattern->matte == MagickFalse) pixel->opacity=OpaqueOpacity; return(status); } static inline MagickBooleanType GetStrokeColor(const DrawInfo *draw_info, const ssize_t x,const ssize_t y,PixelPacket *pixel) { Image *pattern; MagickBooleanType status; pattern=draw_info->stroke_pattern; if (pattern == (Image *) NULL) { *pixel=draw_info->stroke; return(MagickTrue); } #if defined(MAGICKCORE_OPENMP_SUPPORT) && (_OPENMP >= 200203) #pragma omp critical #endif status=GetOneVirtualMethodPixel(pattern,TileVirtualPixelMethod, x+pattern->tile_offset.x,y+pattern->tile_offset.y,pixel, &pattern->exception); if (pattern->matte == MagickFalse) pixel->opacity=OpaqueOpacity; return(status); } #if defined(__cplusplus) || defined(c_plusplus) } #endif #endif
lastprivate-conditional-5.c
/* { dg-do compile } */ /* { dg-options "-O2 -fopenmp -fdump-tree-ompexp" } */ /* { dg-final { scan-tree-dump-times "GOMP_loop_start " 3 "ompexp" } } */ /* { dg-final { scan-tree-dump-times "GOMP_loop_end_nowait " 3 "ompexp" } } */ int r; void foo (int *a) { #pragma omp for nowait lastprivate(conditional: r) for (int i = 0; i < 64; ++i) if (a[i]) r = a[i]; } void bar (int *a) { #pragma omp for nowait lastprivate(conditional: r) schedule (static, 4) for (int i = 0; i < 64; ++i) if (a[i]) r = a[i]; } void baz (int *a) { #pragma omp for nowait lastprivate(conditional: r) schedule (runtime) for (int i = 0; i < 64; ++i) if (a[i]) r = a[i]; }
GB_binop__rdiv_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fc64) // A*D function (colscale): GB (_AxD__rdiv_fc64) // D*A function (rowscale): GB (_DxB__rdiv_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fc64) // C=scalar+B GB (_bind1st__rdiv_fc64) // C=scalar+B' GB (_bind1st_tran__rdiv_fc64) // C=A+scalar GB (_bind2nd__rdiv_fc64) // C=A'+scalar GB (_bind2nd_tran__rdiv_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // A pattern? 0 // B type: GxB_FC64_t // B pattern? 0 // BinaryOp: cij = GB_FC64_div (bij, aij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_div (y, x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_FC64 || GxB_NO_RDIV_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC64_t alpha_scalar ; GxB_FC64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_div (bij, x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_div (y, aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_div (aij, x) ; \ } GrB_Info GB (_bind1st_tran__rdiv_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_div (y, aij) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB031-truedepfirstdimension-orig-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* There is a loop-carried true dependence within the outer level loop. Data race pair: b[i][j]@66:7 vs. b[i-1][j-1]@66:15 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char * argv[]) { int i, j; int n = 1000, m = 1000; double b[1000][1000]; int _ret_val_0; #pragma cetus private(i, j) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<n; i ++ ) { #pragma cetus private(j) #pragma loop name main#0#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<m; j ++ ) { b[i][j]=0.5; } } #pragma cetus private(i, j) #pragma loop name main#1 for (i=1; i<n; i ++ ) { #pragma cetus private(j) #pragma loop name main#1#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=1; j<m; j ++ ) { b[i][j]=b[i-1][j-1]; } } printf("b[500][500]=%f\n", b[500][500]); _ret_val_0=0; return _ret_val_0; }
ilqr.h
#ifndef INC_2019_ILQR_H #define INC_2019_ILQR_H #include <vector> #include <chrono> #include <iostream> #include "extern/eigen/Eigen/Dense" /* * Convenience definitions */ template<int M, int N> using mat = Eigen::Matrix<double, M, N>; template<int N> using vec = Eigen::Matrix<double, N, 1>; /* * Straightforward templated iLQR implementation * * Template arguments: * X: number of dimensions of state space * U: number of dimensions of action space * T: horizon (number of discrete time steps) */ template<int X, int U> struct ILQR { /* * The following variable names may differ from the usual naming scheme to * better align with the symbols used in the reference paper "Synthesis and * Stabilization of Complex Behaviors through Online Trajectory Optimization" */ // current state sequence std::vector<vec<X>> x = {vec<X>::Zero()}; // current action sequence std::vector<vec<U>> u; // previous state sequence std::vector<vec<X>> prev_x; // previous action sequence std::vector<vec<U>> prev_u; // initial cost double trajCosts = -1.0; // control limits vec<U> uMax = vec<U>::Zero(); vec<U> uMin = vec<U>::Zero(); // parts of the dynamics function jacobian wrt. state std::vector<mat<X, X>> fx; // parts of the dynamics function jacobian wrt. action std::vector<mat<X, U>> fu; // cost function gradient std::vector<vec<X + U>> l; // parts of the cost function gradient wrt. state std::vector<vec<X>> lx; // parts of the cost function gradient wrt. action std::vector<vec<U>> lu; // cost function hessian std::vector<mat<X + U, X + U>> L; // parts of the cost function hessian wrt. state, state std::vector<mat<X, X>> lxx; // parts of the cost function hessian wrt. action, action std::vector<mat<U, U>> luu; // parts of the cost function hessian wrt. action, state std::vector<mat<U, X>> lux; // gradient of value wrt. state vec<X> Vx; // hessian of value wrt. state, state mat<X, X> Vxx; // gradient of cost-to-go wrt. state vec<X> Qx; // gradient of cost-to-go wrt. action vec<U> Qu; // hessian of cost-to-go wrt. state, state mat<X, X> Qxx; // hessian of cost-to-go wrt. action, action mat<U, U> Quu; // hessian of cost-to-go wrt. action, state mat<U, X> Qux; // constant control components std::vector<vec<U>> k; // linear control components std::vector<mat<U, X>> K; // horizon size_t T = 20; // amount of iterations done in last update size_t iterations = 0; // maximum amount of iterations to improve trajectory size_t maxIterations = 5; // the wall clock time needed for one update double elapsedUpdateTime = 0; // whether to use the bfgs instead of finite diff. for costs bool useBfgs = false; ILQR() { uMax = uMax.array() + INFINITY; uMin = uMin.array() - INFINITY; } virtual vec<X> dynamics(vec<X>, vec<U>, int) = 0; virtual double costs(vec<X>, vec<U>, int) = 0; /* * Helper functions for differentiation */ double diffEps = 10.0e-4; virtual mat<X, X + U> jacobian(vec<X> x, vec<U> u, int t) { mat<X, X + U> jac = mat<X, X + U>::Zero(); for (int i = 0; i < X; i += 1) { vec<X> h = vec<X>::Zero(); h[i] = diffEps; vec<X> x0 = x + h; vec<X> x1 = x - h; jac.col(i) = (dynamics(x0, u, t) - dynamics(x1, u, t)) / (2.0 * diffEps); } for (int i = 0; i < U; i += 1) { vec<U> h = vec<U>::Zero(); h[i] = diffEps; vec<U> u0 = u + h; vec<U> u1 = u - h; jac.col(X + i) = (dynamics(x, u0, t) - dynamics(x, u1, t)) / (2.0 * diffEps); } return jac; } virtual vec<X + U> gradient(vec<X> x, vec<U> u, int t) { vec<X + U> grad = vec<X + U>::Zero(); for (int i = 0; i < X; i += 1) { vec<X> h = vec<X>::Zero(); h[i] = diffEps; vec<X> x0 = x + h; vec<X> x1 = x - h; grad[i] = (costs(x0, u, t) - costs(x1, u, t)) / (2.0 * diffEps); } for (int i = 0; i < U; i += 1) { vec<U> h = vec<U>::Zero(); h[i] = diffEps; vec<U> u0 = u + h; vec<U> u1 = u - h; grad[X + i] = (costs(x, u0, t) - costs(x, u1, t)) / (2.0 * diffEps); } return grad; } virtual mat<X + U, X + U> hessian( vec<X> x, vec<U> u, int t, vec<X + U>& grad) { mat<X + U, X + U> hess = mat<X + U, X + U>::Zero(); for (int i = 0; i < X; i += 1) { vec<X> h = vec<X>::Zero(); h[i] = diffEps; vec<X> x0 = x + h; hess.col(i) = (gradient(x0, u, t) - grad) / diffEps; } for (int i = 0; i < U; i += 1) { vec<U> h = vec<U>::Zero(); h[i] = diffEps; vec<U> u0 = u + h; hess.col(X + i) = (gradient(x, u0, t) - grad) / diffEps; } return hess; } virtual void bfgsUpdate( vec<X> x, vec<U> u, int t, vec<X> prev_x, vec<U> prev_u, vec<X + U>& grad, mat<X + U, X + U>& hessian) { vec<X + U> s; s << x - prev_x, u - prev_u; vec<X + U> newGrad = gradient(x, u, t); vec<X + U> y = newGrad - grad; grad = newGrad; double d = y.transpose() * s; if (d == 0) { return; } mat<X + U, X + U> firstMat = (y * y.transpose()) / d; vec<X + U> v = hessian * s; mat<X + U, X + U> secondMat = (v * v.transpose()) / (s.transpose() * v); hessian += firstMat - secondMat; } bool update() { if (x.size() != T) { x.resize(T, vec<X>::Zero()); u.resize(T, vec<U>::Zero()); prev_x.resize(T, vec<X>::Zero()); prev_u.resize(T, vec<U>::Zero()); fx.resize(T, mat<X, X>::Zero()); fu.resize(T, mat<X, U>::Zero()); l.resize(T, vec<X + U>::Zero()); lx.resize(T, vec<X>::Zero()); lu.resize(T, vec<U>::Zero()); L.resize(T, mat<X + U, X + U>::Zero()); lxx.resize(T, mat<X, X>::Zero()); luu.resize(T, mat<U, U>::Zero()); lux.resize(T, mat<U, X>::Zero()); k.resize(T, vec<U>::Zero()); K.resize(T, mat<U, X>::Zero()); } std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now(); bool trajectoryChanged = true; bool improved = false; double mu = 1.0; double minMu = 10e-6; double muDelta = 2; double minMuDelta = 2; // do intial trajectory rollout trajCosts = 0.0; for (size_t t = 0; t < T-1; t += 1) { x[t+1] = dynamics(x[t], u[t], t); trajCosts += costs(x[t], u[t], t); } trajCosts += costs(x[T-1], u[T-1], T-1); for (size_t s = 0; s < maxIterations; s += 1) { if (trajectoryChanged) { // recalculate derivatives around new trajectory #pragma omp parallel for for (size_t t = 0; t < T; t += 1) { mat<X, X + U> dynamicsJacobian = jacobian(x[t], u[t], t); fx[t] = dynamicsJacobian.block(0, 0, X, X); fu[t] = dynamicsJacobian.block(0, X, X, U); if (s != 0 && useBfgs) { bfgsUpdate(x[t], u[t], t, prev_x[t], prev_u[t], l[t], L[t]); } else { l[t] = gradient(x[t], u[t], t); lx[t] = l[t].block(0, 0, X, 1); lu[t] = l[t].block(X, 0, U, 1); L[t] = hessian(x[t], u[t], t, l[t]); lxx[t] = L[t].block(0, 0, X, X); luu[t] = L[t].block(X, X, U, U); lux[t] = L[t].block(X, 0, U, X); } } trajectoryChanged = false; } // backward pass // initialize value components with costs of final state Vx = lx[T-1]; Vxx = lxx[T-1]; for (int t = T-1; t > -1; t -= 1) { // update cost-go-go components Qx = lx[t] + fx[t].transpose() * Vx; Qu = lu[t] + fu[t].transpose() * Vx; Qxx = lxx[t] + fx[t].transpose() * Vxx * fx[t]; Quu = luu[t] + fu[t].transpose() * Vxx * fu[t]; Qux = lux[t] + fu[t].transpose() * Vxx * fx[t]; // regularized cost-to-go components mat<X, X> modVxx = Vxx.array() + mat<X, X>::Identity().array() * mu; mat<U, U> modQuu = luu[t] + fu[t].transpose() * modVxx * fu[t]; mat<U, X> modQux = lux[t] + fu[t].transpose() * modVxx * fx[t]; // compute control components mat<U, U> H = -modQuu.inverse(); k[t] = H * Qu; K[t] = H * modQux; vec<U> c = u[t] + k[t]; // apply control limits for (size_t d = 0; d < U; d += 1) { if (c[d] > uMax[d]) { k[t][d] = uMax[d] - u[t][d]; K[t].row(d) = mat<1, X>::Zero(); } if (c[d] < uMin[d]) { k[t][d] = uMin[d] - u[t][d]; K[t].row(d) = mat<1, X>::Zero(); } } // update value components, using improved value update Vx = Qx + K[t].transpose() * Quu * k[t] + K[t].transpose() * Qu + Qux.transpose() * k[t]; Vxx = Qxx + K[t].transpose() * Quu * K[t] + K[t].transpose() * Qux + Qux.transpose() * K[t]; Vxx = 0.5 * (Vxx.transpose() + Vxx); } // simple parallel linesearch, prevents overshooting double alphaSteps[6] = {1.0, 0.5, 0.25, 0.125, 0.0625, 0.03125}; #pragma omp parallel for for (int i = 0; i < 6; ++i) { double alpha = alphaSteps[i]; std::vector<vec<X>> states(T); states[0] = x[0]; std::vector<vec<U>> actions(T); double newCosts = 0.0; for (size_t t = 0; t < T-1; t += 1) { actions[t] = u[t] + K[t] * (states[t] - x[t]) + k[t] * alpha; states[t+1] = dynamics(states[t], actions[t], t); newCosts += costs(states[t], actions[t], t); } actions[T-1] = u[T-1] + K[T-1] * (states[T-1] - x[T-1]) + k[T-1] * alpha; newCosts += costs(states[T-1], actions[T-1], T-1); #pragma omp critical if (newCosts < trajCosts) { prev_x = x; prev_u = u; x = states; u = actions; trajCosts = newCosts; trajectoryChanged = true; improved = true; } } if (trajectoryChanged) { // linesearch found alpha that allows improvement // very stupid, but idk how to do it elegantly in eigen double gradNorm = 0.0; for (size_t i = 0; i < u.size(); ++i) { gradNorm += (k[i].array().abs() / u[i].array().abs()).mean(); } gradNorm /= u.size(); // check if gradient is small enougth if (s > 0 && gradNorm < 10e-6 && mu < 10e-5) { iterations = s + 1; break; } // decrease regularization muDelta = std::min(1.0/minMuDelta, muDelta/minMuDelta); if (mu * muDelta > minMu) { mu = mu * muDelta; } else if (mu * muDelta <= minMu) { mu = 0.0; } } else { // no improvement possible for any alpha muDelta = std::max(minMuDelta, muDelta * minMuDelta); mu = std::max(minMu, mu * muDelta); if (mu > 10e7) { iterations = s + 1; break; } } iterations = s + 1; } // measure time for statistics std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); elapsedUpdateTime = std::chrono::duration_cast< std::chrono::milliseconds>(end - begin).count(); return improved; } }; #endif
omp_for_ordered.c
<ompts:test> <ompts:testdescription>Test which checks the omp ordered directive by counting up an variable in an parallelized loop and watching each iteration if the sumand is larger as the last one.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp for ordered</ompts:directive> <ompts:dependences>omp critical,omp for schedule</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <math.h> #include "omp_testsuite.h" static int last_i = 0; /* Utility function to check that i is increasing monotonically with each call */ static int check_i_islarger (int i) { int islarger; islarger = (i > last_i); last_i = i; return (islarger); } int <ompts:testcode:functionname>omp_for_ordered</ompts:testcode:functionname> (FILE * logFile) { <ompts:orphan:vars> int sum; int is_larger = 1; </ompts:orphan:vars> int known_sum; last_i = 0; sum = 0; #pragma omp parallel { <ompts:orphan> int i; int my_islarger = 1; #pragma omp for schedule(static,1) ordered for (i = 1; i < 100; i++) { <ompts:check>#pragma omp ordered</ompts:check> { my_islarger = check_i_islarger(i) && my_islarger; sum = sum + i; } /* end of ordered */ } /* end of for */ #pragma omp critical { is_larger = is_larger && my_islarger; } /* end of critical */ </ompts:orphan> } known_sum=(99 * 100) / 2; return ((known_sum == sum) && is_larger); } </ompts:testcode> </ompts:test>
irbuilder_unroll_partial_heuristic_constant_for.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics // REQUIRES: x86-registered-target // TODO: The unroll-factor heuristic might be able to use the information that the trip count is constant, but currently is not able to determine that. #ifndef HEADER #define HEADER double sind(double); // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_constant_for( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[E_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[OFFSET_ADDR:.+]] = alloca float, align 4 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store float* %[[E:.+]], float** %[[E_ADDR]], align 8 // CHECK-NEXT: store float %[[OFFSET:.+]], float* %[[OFFSET_ADDR]], align 4 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: %[[TMP3:.+]] = udiv i32 %[[DOTCOUNT]], 4 // CHECK-NEXT: %[[TMP4:.+]] = urem i32 %[[DOTCOUNT]], 4 // CHECK-NEXT: %[[TMP5:.+]] = icmp ne i32 %[[TMP4]], 0 // CHECK-NEXT: %[[TMP6:.+]] = zext i1 %[[TMP5]] to i32 // CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP3]], %[[TMP6]] // CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]: // CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1 // CHECK-NEXT: store i32 %[[TMP7]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 1) // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = sub i32 %[[TMP9]], %[[TMP8]] // CHECK-NEXT: %[[TMP11:.+]] = add i32 %[[TMP10]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_HEADER]]: // CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_COND]]: // CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP11]] // CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_BODY]]: // CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP8]] // CHECK-NEXT: %[[TMP13:.+]] = icmp eq i32 %[[TMP12]], %[[OMP_FLOOR0_TRIPCOUNT]] // CHECK-NEXT: %[[TMP14:.+]] = select i1 %[[TMP13]], i32 %[[TMP4]], i32 4 // CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_HEADER]]: // CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_COND]]: // CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP14]] // CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_BODY]]: // CHECK-NEXT: %[[TMP15:.+]] = mul nuw i32 4, %[[TMP12]] // CHECK-NEXT: %[[TMP16:.+]] = add nuw i32 %[[TMP15]], %[[OMP_TILE0_IV]] // CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP16]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP17:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP18:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP18]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP19:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[CONV:.+]] = fpext float %[[TMP19]] to double // CHECK-NEXT: %[[CALL:.+]] = call double @sind(double noundef %[[CONV]]) // CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP21]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP22:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[CONV4:.+]] = fpext float %[[TMP22]] to double // CHECK-NEXT: %[[MUL:.+]] = fmul double %[[CALL]], %[[CONV4]] // CHECK-NEXT: %[[TMP23:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP24:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM5:.+]] = sext i32 %[[TMP24]] to i64 // CHECK-NEXT: %[[ARRAYIDX6:.+]] = getelementptr inbounds float, float* %[[TMP23]], i64 %[[IDXPROM5]] // CHECK-NEXT: %[[TMP25:.+]] = load float, float* %[[ARRAYIDX6]], align 4 // CHECK-NEXT: %[[CONV7:.+]] = fpext float %[[TMP25]] to double // CHECK-NEXT: %[[MUL8:.+]] = fmul double %[[MUL]], %[[CONV7]] // CHECK-NEXT: %[[TMP26:.+]] = load float*, float** %[[E_ADDR]], align 8 // CHECK-NEXT: %[[TMP27:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM9:.+]] = sext i32 %[[TMP27]] to i64 // CHECK-NEXT: %[[ARRAYIDX10:.+]] = getelementptr inbounds float, float* %[[TMP26]], i64 %[[IDXPROM9]] // CHECK-NEXT: %[[TMP28:.+]] = load float, float* %[[ARRAYIDX10]], align 4 // CHECK-NEXT: %[[CONV11:.+]] = fpext float %[[TMP28]] to double // CHECK-NEXT: %[[MUL12:.+]] = fmul double %[[MUL8]], %[[CONV11]] // CHECK-NEXT: %[[TMP29:.+]] = load float, float* %[[OFFSET_ADDR]], align 4 // CHECK-NEXT: %[[CONV13:.+]] = fpext float %[[TMP29]] to double // CHECK-NEXT: %[[ADD:.+]] = fadd double %[[MUL12]], %[[CONV13]] // CHECK-NEXT: %[[TMP30:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP31:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM14:.+]] = sext i32 %[[TMP31]] to i64 // CHECK-NEXT: %[[ARRAYIDX15:.+]] = getelementptr inbounds float, float* %[[TMP30]], i64 %[[IDXPROM14]] // CHECK-NEXT: %[[TMP32:.+]] = load float, float* %[[ARRAYIDX15]], align 4 // CHECK-NEXT: %[[CONV16:.+]] = fpext float %[[TMP32]] to double // CHECK-NEXT: %[[ADD17:.+]] = fadd double %[[CONV16]], %[[ADD]] // CHECK-NEXT: %[[CONV18:.+]] = fptrunc double %[[ADD17]] to float // CHECK-NEXT: store float %[[CONV18]], float* %[[ARRAYIDX15]], align 4 // CHECK-NEXT: br label %[[OMP_TILE0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_INC]]: // CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1 // CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_EXIT]]: // CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_AFTER]]: // CHECK-NEXT: br label %[[OMP_FLOOR0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_INC]]: // CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_EXIT]]: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]]) // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM19:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM19]]) // CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_AFTER]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_partial_heuristic_constant_for(float *a, float *b, float *c, float *d, float *e, float offset) { #pragma omp for #pragma omp unroll partial for (int i = 0; i < 128; i++) { a[i] += sind(b[i]) * c[i] * d[i] * e[i] + offset; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: store i32 128, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]] // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"} // CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 4}
GB_unop__asinh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__asinh_fc64_fc64 // op(A') function: GB_unop_tran__asinh_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = casinh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = casinh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = casinh (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASINH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__asinh_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = casinh (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = casinh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__asinh_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
threading.h
#ifndef LIGHTGBM_UTILS_THREADING_H_ #define LIGHTGBM_UTILS_THREADING_H_ #include <LightGBM/utils/openmp_wrapper.h> #include <vector> #include <functional> namespace LightGBM { class Threading { public: template<typename INDEX_T> static inline void For(INDEX_T start, INDEX_T end, const std::function<void(int, INDEX_T, INDEX_T)>& inner_fun) { int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } INDEX_T num_inner = (end - start + num_threads - 1) / num_threads; if (num_inner <= 0) { num_inner = 1; } OMP_INIT_EX(); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T inner_start = start + num_inner * i; INDEX_T inner_end = inner_start + num_inner; if (inner_end > end) { inner_end = end; } if (inner_start < end) { inner_fun(i, inner_start, inner_end); } OMP_LOOP_EX_END(); } OMP_THROW_EX(); } }; } // namespace LightGBM #endif // LightGBM_UTILS_THREADING_H_
ac3d_openmp.c
#include<stdio.h> #include"3d_lib.c" // vx void ac3d_openmp(double *vx, int BD_nx_vx, int BD_ny_vx, int BD_nz_vx, double *pvxbtpp, double *vy, int BD_nx_vy, int BD_ny_vy, int BD_nz_vy, double *pvybtpp, double *vz, int BD_nx_vz, int BD_ny_vz, int BD_nz_vz, double *pvzbtpp, double *tpp, double *ptppbvx, double *ptppbvy, double *ptppbvz, int BD_nx_tpp, int BD_ny_tpp, int BD_nz_tpp, double *rho, double *lambda, double *fdc, double dt, double dx, double dy, double dz, int ext, double *bhalf, double *ahalf, double *bfull, double *afull) { int i,j,k; //********************* V_X *********************// #pragma omp parallel { // vxbtxx #pragma omp for collapse(2) private(j) nowait for(k=0; k<BD_ny_vx; k++) { for(i=0; i<BD_nz_vx; i++) { for(j=1; j<ext; j++) { bound_x_3d(vx, BD_nz_vx, BD_nx_vx, BD_ny_vx, i, j, k, tpp, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+(BD_nx_vx-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(BD_nx_vx-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dx, dt, pvxbtpp, bhalf, ahalf, ext, fdc); } for(j=ext; j<BD_nx_vx-ext; j++) { body_x_3d(vx, BD_nz_vx, BD_nx_vx, BD_ny_vx, i, j, k, tpp, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dx, dt, fdc); } } } //********************* V_Y *********************// // vybtyy #pragma omp for collapse(2) private(k) nowait for(j=0; j<BD_nx_vy; j++) { for(i=0; i<BD_nz_vy; i++) { for(k=1; k<ext; k++) { bound_y_3d(vy, BD_nz_vy, BD_nx_vy, BD_ny_vy, i, j, k, tpp, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+j*BD_nz_tpp+(BD_ny_vy-1-k)*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(BD_ny_vy-k)*BD_nz_tpp*BD_nx_tpp)), dy,dt, pvybtpp, bhalf, ahalf, ext, fdc); } for(k=ext; k<BD_ny_vy-ext; k++) { body_y_3d(vy, BD_nz_vy, BD_nx_vy, BD_ny_vy, i, j, k, tpp, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)), dy,dt, fdc); } } } //********************* V_Z *********************// // vzbtzz #pragma omp for collapse(2) private(i) nowait for(j=0; j<BD_nx_vz; j++) { for(k=0; k<BD_ny_vz; k++) { for(i=1;i<ext;i++) { unlimited_bound_z_3d(vz, BD_nz_vz, BD_nx_vz, BD_ny_vz, i, j, k, tpp, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+(BD_nz_vz-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(BD_nz_vz-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dz,dt, pvzbtpp, bhalf, ahalf, ext, fdc); } for(i=ext; i<BD_nz_vz-ext; i++) { body_z_3d(vz, BD_nz_vz, BD_nx_vz, BD_ny_vz, i, j, k, tpp, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dz,dt, fdc); } } } #pragma omp barrier //************ T_X ************// #pragma omp for collapse(2) private(j) for(k=0; k<BD_ny_tpp; k++) { for(i=0; i<BD_nz_tpp; i++) { for(j=2; j<ext; j++) { ac_bound_tpp_x_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vx, BD_nz_vx, BD_nx_vx, 2, lambda, dx, dt, ptppbvx, bfull, afull, ext, fdc); } for(j=ext; j<BD_nx_tpp-ext; j++) { ac_body_tpp_x_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vx, BD_nz_vx, BD_nx_vx, 2, lambda, dx, dt, fdc); } } } //************ T_Y ************// #pragma omp for collapse(2) private(k) for(i=0; i<BD_nz_tpp; i++) { for(j=0; j<BD_nx_tpp; j++) { for(k=2; k<ext; k++) { ac_bound_tpp_y_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vy, BD_nz_vy, BD_nx_vy, 2, lambda, dy, dt, ptppbvy, bfull, afull, ext, fdc); } for(k=ext; k<BD_ny_tpp-ext; k++) { ac_body_tpp_y_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vy, BD_nz_vy, BD_nx_vy, 2, lambda, dy, dt, fdc); } } } //************ T_Z ************// #pragma omp for collapse(2) private(i) for(j=0; j<BD_nx_tpp; j++) { for(k=0; k<BD_ny_tpp; k++) { for(i=2; i<ext; i++) { ac_unlimited_bound_tpp_z_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vz, BD_nz_vz, BD_nx_vz, 2, lambda, dz, dt, ptppbvz, bfull, afull, ext, fdc); } for(i=ext; i<BD_nz_tpp-ext; i++) { ac_body_tpp_z_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vz, BD_nz_vz, BD_nx_vz, 2, lambda, dz, dt, fdc); } } } } }
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *,const MapMode, const RectangleInfo *,NexusInfo *,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireCriticalMemory(sizeof(*cache_info)); (void) ResetMagickMemory(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory( number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads, sizeof(**nexus_info)); if (nexus_info[0] == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) number_threads; i++) { nexus_info[i]=(&nexus_info[0][i]); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % const void *AcquirePixelCachePixels(const Image *image, % MagickSizeType *length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate const void *AcquirePixelCachePixels(const Image *image, MagickSizeType *length,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((const void *) NULL); *length=cache_info->length; return((const void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && \ ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && \ ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads); clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads); if ((cache_nexus == (NexusInfo **) NULL) || (clone_nexus == (NexusInfo **) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; register ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { register ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads); clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache == (void *) NULL) return; image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) number_threads; i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info->type == UndefinedCache) SyncImagePixelCache((Image *) image,exception); if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->read_mask != cache_info->read_mask) || (image->write_mask != cache_info->write_mask) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=time((time_t *) NULL); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->type == DiskCache) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { register ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y, pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the class type of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(cache_info->number_channels*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(cache_info->number_channels*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelsFromNexus() method is: % % Quantum *GetVirtualPixelsFromNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; /* Compute the remainder of dividing offset by extent. It returns not only the quotient (tile the offset falls in) but also the positive remainer within that tile such that 0 <= remainder < extent. This method is essentially a ldiv() using a floored modulo division rather than the normal default truncated modulo division. */ modulo.quotient=offset/(ssize_t) extent; if (offset < 0L) modulo.quotient--; modulo.remainder=offset-modulo.quotient*(ssize_t) extent; return(modulo); } MagickPrivate const Quantum *GetVirtualPixelsFromNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo **magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; RectangleInfo region; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,nexus_info, exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ s=(unsigned char *) nexus_info->metacontent; virtual_nexus=AcquirePixelCacheNexus(1); if (virtual_nexus == (NexusInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) ResetMagickMemory(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) ResetMagickMemory(virtual_metacontent,0, cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) length*cache_info->number_channels* sizeof(*p)); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,*virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); (void) memcpy(q,p,(size_t) length*cache_info->number_channels*sizeof(*p)); q+=length*cache_info->number_channels; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) (void) posix_fallocate(cache_info->file,offset+1,extent-offset); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) || (AcquireMagickResource(HeightResource,image->rows) == MagickFalse)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) GetImageIndexInList(image)); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->read_mask=image->read_mask; cache_info->write_mask=image->write_mask; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) cache_info->pixels=source_info.pixels; else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { RelinquishMagickResource(DiskResource,cache_info->length); ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if (status == MagickFalse) cache_info->type=DiskCache; else if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache)) { cache_info->type=DiskCache; RelinquishMagickResource(MapResource,cache_info->length); } else { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->type=DiskCache; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(SyncImagePixelCache(image,exception)); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->read_mask=cache_info->read_mask; clone_info->write_mask=cache_info->write_mask; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; RectangleInfo region; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,&region,nexus_info, exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register ssize_t y; register unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register Quantum *magick_restrict q; register ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels(const CacheInfo *cache_info, % const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o region: A pointer to the RectangleInfo structure that defines the % region of this particular cache nexus. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,NexusInfo *nexus_info, ExceptionInfo *exception) { if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length)) return(MagickFalse); if (cache_anonymous_memory <= 0) { nexus_info->mapped=MagickFalse; nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) nexus_info->length)); if (nexus_info->cache != (Quantum *) NULL) (void) ResetMagickMemory(nexus_info->cache,0,(size_t) nexus_info->length); } else { nexus_info->mapped=MagickTrue; nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) nexus_info->length); } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsPixelCacheAuthentic( const CacheInfo *magick_restrict cache_info, const NexusInfo *magick_restrict nexus_info) { MagickBooleanType status; MagickOffsetType offset; /* Does nexus pixels point directly to in-core cache pixels or is it buffered? */ if (cache_info->type == PingCache) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; status=nexus_info->pixels == (cache_info->pixels+offset* cache_info->number_channels) ? MagickTrue : MagickFalse; return(status); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1); } static Quantum *SetPixelCacheNexusPixels(const CacheInfo *cache_info, const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); if ((region->width == 0) || (region->height == 0)) return((Quantum *) NULL); nexus_info->region=(*region); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; if (number_pixels == 0) return((Quantum *) NULL); if ((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) { ssize_t x, y; x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1; y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1; if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) && (nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) && ((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) && ((nexus_info->region.width == cache_info->columns) || ((nexus_info->region.width % cache_info->columns) == 0))))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ length=number_pixels*cache_info->number_channels*sizeof(Quantum); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; if (nexus_info->cache == (Quantum *) NULL) { nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+number_pixels* cache_info->number_channels); PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) { image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (status != MagickFalse) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const unsigned char *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const Quantum *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
ams.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" #include "float.h" #include "ams.h" /*-------------------------------------------------------------------------- * hypre_ParCSRRelax * * Relaxation on the ParCSR matrix A with right-hand side f and * initial guess u. Possible values for relax_type are: * * 1 = l1-scaled (or weighted) Jacobi * 2 = l1-scaled block Gauss-Seidel/SSOR * 3 = Kaczmarz * 4 = truncated version of 2 (Remark 6.2 in smoothers paper) * x = BoomerAMG relaxation with relax_type = |x| * (16 = Cheby) * * The default value of relax_type is 2. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelax(/* matrix to relax with */ hypre_ParCSRMatrix *A, /* right-hand side */ hypre_ParVector *f, /* relaxation type */ HYPRE_Int relax_type, /* number of sweeps */ HYPRE_Int relax_times, /* l1 norms of the rows of A */ HYPRE_Real *l1_norms, /* damping coefficient (usually <= 1) */ HYPRE_Real relax_weight, /* SOR parameter (usually in (0,2) */ HYPRE_Real omega, /* for cheby smoothers */ HYPRE_Real max_eig_est, HYPRE_Real min_eig_est, HYPRE_Int cheby_order, HYPRE_Real cheby_fraction, /* initial/updated approximation */ hypre_ParVector *u, /* temporary vector */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *z) { HYPRE_Int sweep; HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u)); HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f)); HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v)); //printRC(hypre_ParVectorLocalVector(u),"STarting...."); for (sweep = 0; sweep < relax_times; sweep++) { if (relax_type == 1) /* l1-scaled Jacobi */ { PUSH_RANGE_PAYLOAD("RELAX",4,sweep); HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); #ifdef HYPRE_USING_UNIFIED_MEMORY if (sweep==0) { hypre_SeqVectorPrefetchToDevice(hypre_ParVectorLocalVector(v)); hypre_SeqVectorPrefetchToDevice(hypre_ParVectorLocalVector(f)); } #endif //SyncVectorToHost(hypre_ParVectorLocalVector(v)); //SyncVectorToHost(hypre_ParVectorLocalVector(f)); #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) VecCopy(v_data,f_data,hypre_VectorSize(hypre_ParVectorLocalVector(v)),HYPRE_STREAM(4)); #else //printRC(hypre_ParVectorLocalVector(v),"Pre-COPY V"); //printRC(hypre_ParVectorLocalVector(f),"Pre-COPY F"); hypre_ParVectorCopy(f,v); #endif #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD SyncVectorToDevice(hypre_ParVectorLocalVector(v)); #endif hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, v); //SyncVectorToHost(hypre_ParVectorLocalVector(v)); //SyncVectorToHost(hypre_ParVectorLocalVector(u)); PUSH_RANGE_PAYLOAD("VECSCALE-RELAX",5,num_rows); #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) VecScale(u_data,v_data,l1_norms,num_rows,HYPRE_STREAM(4)); #else HYPRE_Int i; /* u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1 */ #if defined(HYPRE_USING_OPENMP_OFFLOAD) HYPRE_Int num_teams = (num_rows+num_rows%1024)/1024; //printf("AMS.C %d = %d \n",num_rows,num_teams*1024); //printf("Ptypes %d %d %d \n",PointerAttributes(u_data),PointerAttributes(v_data),PointerAttributes(l1_norms)); #pragma omp target teams distribute parallel for private(i) num_teams(num_teams) thread_limit(1024) is_device_ptr(u_data,v_data,l1_norms) #elif defined(HYPRE_USING_MAPPED_OPENMP_OFFLOAD) HYPRE_Int num_teams = (num_rows+num_rows%1024)/1024; #pragma omp target teams distribute parallel for private(i) num_teams(num_teams) thread_limit(1024) #endif for (i = 0; i < num_rows; i++) { u_data[i] += v_data[i] / l1_norms[i]; } #endif #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD UpdateDRC(hypre_ParVectorLocalVector(u)); #endif //printf("AMS.C DONE %d = %d \n",num_rows,num_teams*1024); POP_RANGE; POP_RANGE; } else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */ { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int i, j; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); HYPRE_Real res; HYPRE_Int num_procs; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); /* Copy off-diagonal values of u to the current processor */ if (num_procs > 1) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_sends; HYPRE_Real *u_buf_data; hypre_ParCSRCommHandle *comm_handle; HYPRE_Int index = 0, start; if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST); } if (relax_weight == 1.0 && omega == 1.0) /* symmetric Gauss-Seidel */ { /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += res / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += res / l1_norms[i]; } } else if (relax_weight == 1.0) /* SSOR */ { /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += omega * res / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += omega * res / l1_norms[i]; } } else /* scaled SSOR */ { HYPRE_Real dif; HYPRE_Real c1 = omega * relax_weight; HYPRE_Real c2 = omega * (1.0 - relax_weight); /* Forward local pass (save initial guess in v_data) */ for (i = 0; i < num_rows; i++) { dif = 0.0; v_data[i] = u_data[i]; res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (A_diag_J[j] < i) dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]); } if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += (c1 * res + c2 * dif) / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { dif = 0.0; res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (A_diag_J[j] > i) dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]); } if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += (c1 * res + c2 * dif) / l1_norms[i]; } } hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST); } else if (relax_type == 3) /* Kaczmarz */ { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int i, j; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); HYPRE_Real res; HYPRE_Int num_procs; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); /* Copy off-diagonal values of u to the current processor */ if (num_procs > 1) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_sends; HYPRE_Real *u_buf_data; hypre_ParCSRCommHandle *comm_handle; HYPRE_Int index = 0, start; if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST); } /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; res /= l1_norms[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) u_data[A_diag_J[j]] += omega * res * A_diag_data[j]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; res /= l1_norms[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) u_data[A_diag_J[j]] += omega * res * A_diag_data[j]; } hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST); } else /* call BoomerAMG relaxation */ { if (relax_type == 16) { hypre_ParCSRRelax_Cheby(A, f, max_eig_est, min_eig_est, cheby_fraction, cheby_order, 1, 0, u, v, z); } else hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight, omega, l1_norms, u, v, z); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInRangeOf * * Return a vector that belongs to the range of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorInDomainOf * * Return a vector that belongs to the domain of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixColStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockSplit * * Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel * block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data_[d][i] = x_data[dim*i+d]; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockGather * * Compose a parallel block vector x from dim given sub-vectors * x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data[dim*i+d] = x_data_[d][i]; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGBlockSolve * * Apply the block-diagonal solver diag(B) to the system diag(A) x = b. * Here B is a given BoomerAMG solver for A, while x and b are "block" * parallel vectors. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBlockSolve(void *B, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { HYPRE_Int d, dim = 1; hypre_ParVector *b_[3]; hypre_ParVector *x_[3]; dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A); if (dim == 1) { hypre_BoomerAMGSolve(B, A, b, x); return hypre_error_flag; } for (d = 0; d < dim; d++) { b_[d] = hypre_ParVectorInRangeOf(A); x_[d] = hypre_ParVectorInRangeOf(A); } hypre_ParVectorBlockSplit(b, b_, dim); hypre_ParVectorBlockSplit(x, x_, dim); for (d = 0; d < dim; d++) hypre_BoomerAMGSolve(B, A, b_[d], x_[d]); hypre_ParVectorBlockGather(x, x_, dim); for (d = 0; d < dim; d++) { hypre_ParVectorDestroy(b_[d]); hypre_ParVectorDestroy(x_[d]); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFixZeroRows * * For every zero row in the matrix: set the diagonal element to 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A) { HYPRE_Int i, j; HYPRE_Real l1_norm; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); /* a row will be considered zero if its l1 norm is less than eps */ HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */ for (i = 0; i < num_rows; i++) { l1_norm = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm += fabs(A_diag_data[j]); if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm += fabs(A_offd_data[j]); if (l1_norm <= eps) { for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (A_diag_J[j] == i) A_diag_data[j] = 1.0; else A_diag_data[j] = 0.0; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) A_offd_data[j] = 0.0; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real diag; HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, HYPRE_MEMORY_SHARED); HYPRE_Int *cf_marker_offd = NULL; HYPRE_Int cf_diag; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } if (option == 1) { for (i = 0; i < num_rows; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += fabs(A_diag_data[j]); /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (cf_diag == cf_marker[A_diag_J[j]]) l1_norm[i] += fabs(A_diag_data[j]); /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 2) { for (i = 0; i < num_rows; i++) { /* Add the diag element of the ith row */ l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]); if (cf_marker == NULL) { /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 3) { for (i = 0; i < num_rows; i++) { l1_norm[i] = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += A_diag_data[j] * A_diag_data[j]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += A_offd_data[j] * A_offd_data[j]; } } else if (option == 4) { for (i = 0; i < num_rows; i++) { /* Add the diag element of the ith row */ diag = l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]); if (cf_marker == NULL) { /* Add the scaled l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the scaled CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } /* Truncate according to Remark 6.2 */ if (l1_norm[i] <= 4.0/3.0*diag) l1_norm[i] = diag; } } else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */ { for (i = 0; i < num_rows; i++) { diag = A_diag_data[A_diag_I[i]]; if (diag != 0.0) l1_norm[i] = diag; else l1_norm[i] = 1.0; } *l1_norm_ptr = l1_norm; return hypre_error_flag; } /* Handle negative definite matrices */ for (i = 0; i < num_rows; i++) if (A_diag_data[A_diag_I[i]] < 0) l1_norm[i] = -l1_norm[i]; for (i = 0; i < num_rows; i++) /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } //for (i = 0; i < num_rows; i++) l1_norm[i]=1.0/l1_norm[i]; hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST); *l1_norm_ptr = l1_norm; #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD #pragma omp target enter data map(to:l1_norm[0:num_rows]) if (num_rows>0) #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDiagRows * * For every row containing only a diagonal element: set it to d. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); for (i = 0; i < num_rows; i++) { j = A_diag_I[i]; if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) && (!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i]))) { A_diag_data[j] = d; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSCreate * * Allocate the AMS solver structure. *--------------------------------------------------------------------------*/ void * hypre_AMSCreate() { hypre_AMSData *ams_data; ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST); /* Default parameters */ ams_data -> dim = 3; /* 3D problem */ ams_data -> maxit = 20; /* perform at most 20 iterations */ ams_data -> tol = 1e-6; /* convergence tolerance */ ams_data -> print_level = 1; /* print residual norm at each step */ ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */ ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */ ams_data -> A_relax_times = 1; /* one relaxation sweep */ ams_data -> A_relax_weight = 1.0; /* damping parameter */ ams_data -> A_omega = 1.0; /* SSOR coefficient */ ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */ ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */ ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_G_theta = 0.25; /* strength threshold */ ams_data -> B_G_interp_type = 0; /* interpolation type */ ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_Pi_theta = 0.25; /* strength threshold */ ams_data -> B_Pi_interp_type = 0; /* interpolation type */ ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> beta_is_zero = 0; /* the problem has a mass term */ /* By default, do l1-GS smoothing on the coarsest grid */ ams_data -> B_G_coarse_relax_type = 8; ams_data -> B_Pi_coarse_relax_type = 8; /* The rest of the fields are initialized using the Set functions */ ams_data -> A = NULL; ams_data -> G = NULL; ams_data -> A_G = NULL; ams_data -> B_G = 0; ams_data -> Pi = NULL; ams_data -> A_Pi = NULL; ams_data -> B_Pi = 0; ams_data -> x = NULL; ams_data -> y = NULL; ams_data -> z = NULL; ams_data -> Gx = NULL; ams_data -> Gy = NULL; ams_data -> Gz = NULL; ams_data -> r0 = NULL; ams_data -> g0 = NULL; ams_data -> r1 = NULL; ams_data -> g1 = NULL; ams_data -> r2 = NULL; ams_data -> g2 = NULL; ams_data -> Pix = NULL; ams_data -> Piy = NULL; ams_data -> Piz = NULL; ams_data -> A_Pix = NULL; ams_data -> A_Piy = NULL; ams_data -> A_Piz = NULL; ams_data -> B_Pix = 0; ams_data -> B_Piy = 0; ams_data -> B_Piz = 0; ams_data -> interior_nodes = NULL; ams_data -> G0 = NULL; ams_data -> A_G0 = NULL; ams_data -> B_G0 = 0; ams_data -> projection_frequency = 5; ams_data -> A_l1_norms = NULL; ams_data -> A_max_eig_est = 0; ams_data -> A_min_eig_est = 0; ams_data -> owns_Pi = 1; ams_data -> owns_A_G = 0; ams_data -> owns_A_Pi = 0; return (void *) ams_data; } /*-------------------------------------------------------------------------- * hypre_AMSDestroy * * Deallocate the AMS solver structure. Note that the input data (given * through the Set functions) is not destroyed. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (!ams_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (ams_data -> owns_A_G) if (ams_data -> A_G) hypre_ParCSRMatrixDestroy(ams_data -> A_G); if (!ams_data -> beta_is_zero) if (ams_data -> B_G) HYPRE_BoomerAMGDestroy(ams_data -> B_G); if (ams_data -> owns_Pi && ams_data -> Pi) hypre_ParCSRMatrixDestroy(ams_data -> Pi); if (ams_data -> owns_A_Pi) if (ams_data -> A_Pi) hypre_ParCSRMatrixDestroy(ams_data -> A_Pi); if (ams_data -> B_Pi) HYPRE_BoomerAMGDestroy(ams_data -> B_Pi); if (ams_data -> owns_Pi && ams_data -> Pix) hypre_ParCSRMatrixDestroy(ams_data -> Pix); if (ams_data -> A_Pix) hypre_ParCSRMatrixDestroy(ams_data -> A_Pix); if (ams_data -> B_Pix) HYPRE_BoomerAMGDestroy(ams_data -> B_Pix); if (ams_data -> owns_Pi && ams_data -> Piy) hypre_ParCSRMatrixDestroy(ams_data -> Piy); if (ams_data -> A_Piy) hypre_ParCSRMatrixDestroy(ams_data -> A_Piy); if (ams_data -> B_Piy) HYPRE_BoomerAMGDestroy(ams_data -> B_Piy); if (ams_data -> owns_Pi && ams_data -> Piz) hypre_ParCSRMatrixDestroy(ams_data -> Piz); if (ams_data -> A_Piz) hypre_ParCSRMatrixDestroy(ams_data -> A_Piz); if (ams_data -> B_Piz) HYPRE_BoomerAMGDestroy(ams_data -> B_Piz); if (ams_data -> r0) hypre_ParVectorDestroy(ams_data -> r0); if (ams_data -> g0) hypre_ParVectorDestroy(ams_data -> g0); if (ams_data -> r1) hypre_ParVectorDestroy(ams_data -> r1); if (ams_data -> g1) hypre_ParVectorDestroy(ams_data -> g1); if (ams_data -> r2) hypre_ParVectorDestroy(ams_data -> r2); if (ams_data -> g2) hypre_ParVectorDestroy(ams_data -> g2); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> A); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> G0); if (ams_data -> A_G0) hypre_ParCSRMatrixDestroy(ams_data -> A_G0); if (ams_data -> B_G0) HYPRE_BoomerAMGDestroy(ams_data -> B_G0); if (ams_data -> A_l1_norms) hypre_TFree(ams_data -> A_l1_norms, HYPRE_MEMORY_SHARED); /* G, x, y ,z, Gx, Gy and Gz are not destroyed */ if (ams_data) hypre_TFree(ams_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDimension * * Set problem dimension (2 or 3). By default we assume dim = 3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDimension(void *solver, HYPRE_Int dim) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (dim != 2 && dim != 3) hypre_error_in_arg(2); ams_data -> dim = dim; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDiscreteGradient * * Set the discrete gradient matrix G. * This function should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver, hypre_ParCSRMatrix *G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> G = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCoordinateVectors * * Set the x, y and z coordinates of the vertices in the mesh. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver, hypre_ParVector *x, hypre_ParVector *y, hypre_ParVector *z) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> x = x; ams_data -> y = y; ams_data -> z = z; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetEdgeConstantVectors * * Set the vectors Gx, Gy and Gz which give the representations of * the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the * edge element basis. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Gx = Gx; ams_data -> Gy = Gy; ams_data -> Gz = Gz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInterpolations * * Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz]. * * This function is generally intended to be used only for high-order Nedelec * discretizations (in the lowest order case, Pi is constructed internally in * AMS from the discreet gradient matrix and the coordinates of the vertices), * though it can also be used in the lowest-order case or for other types of * discretizations (e.g. ones based on the second family of Nedelec elements). * * By definition, Pi is the matrix representation of the linear operator that * interpolates (high-order) vector nodal finite elements into the (high-order) * Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0) * and similarly for Piy and Piz. Note that all these operators depend on the * choice of the basis and degrees of freedom in the high-order spaces. * * The column numbering of Pi should be node-based, i.e. the x/y/z components of * the first node (vertex or high-order dof) should be listed first, followed by * the x/y/z components of the second node and so on (see the documentation of * HYPRE_BoomerAMGSetDofFunc). * * If used, this function should be called before hypre_AMSSetup() and there is * no need to provide the vertex coordinates. Furthermore, only one of the sets * {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide * both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with * cycle_type > 10, will be unavailable. Similarly, AMS cycles based on * monolithic Pi (cycle_type < 10) require that Pi is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInterpolations(void *solver, hypre_ParCSRMatrix *Pi, hypre_ParCSRMatrix *Pix, hypre_ParCSRMatrix *Piy, hypre_ParCSRMatrix *Piz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Pi = Pi; ams_data -> Pix = Pix; ams_data -> Piy = Piy; ams_data -> Piz = Piz; ams_data -> owns_Pi = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * alpha (the curl-curl term coefficient in the Maxwell problem). * * If this function is called, the coarse space solver on the range * of Pi^T is a block-diagonal version of A_Pi. If this function is not * called, the coarse space solver on the range of Pi^T is constructed * as Pi^T A Pi in hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_Pi) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_Pi = A_Pi; /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */ return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * beta (the mass term coefficient in the Maxwell problem). * * This function call is optional - if not given, the Poisson matrix will * be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume * that beta is 0 and use two-level (instead of three-level) methods. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_G = A_G; if (!A_G) ams_data -> beta_is_zero = 1; else { /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */ } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInteriorNodes * * Set the list of nodes which are interior to the zero-conductivity region. * A node is interior if interior_nodes[i] == 1.0. * * Should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInteriorNodes(void *solver, hypre_ParVector *interior_nodes) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> interior_nodes = interior_nodes; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetProjectionFrequency * * How often to project the r.h.s. onto the compatible sub-space Ker(G0^T), * when iterating with the solver. * * The default value is every 5th iteration. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver, HYPRE_Int projection_frequency) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> projection_frequency = projection_frequency; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetMaxIter * * Set the maximum number of iterations in the three-level method. * The default value is 20. To use the AMS solver as a preconditioner, * set maxit to 1, tol to 0.0 and print_level to 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetMaxIter(void *solver, HYPRE_Int maxit) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> maxit = maxit; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetTol * * Set the convergence tolerance (if the method is used as a solver). * The default value is 1e-6. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetTol(void *solver, HYPRE_Real tol) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> tol = tol; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCycleType * * Choose which three-level solver to use. Possible values are: * * 1 = 3-level multipl. solver (01210) <-- small solution time * 2 = 3-level additive solver (0+1+2) * 3 = 3-level multipl. solver (02120) * 4 = 3-level additive solver (010+2) * 5 = 3-level multipl. solver (0102010) <-- small solution time * 6 = 3-level additive solver (1+020) * 7 = 3-level multipl. solver (0201020) <-- small number of iterations * 8 = 3-level additive solver (0(1+2)0) <-- small solution time * 9 = 3-level multipl. solver (01210) with discrete divergence * 11 = 5-level multipl. solver (013454310) <-- small solution time, memory * 12 = 5-level additive solver (0+1+3+4+5) * 13 = 5-level multipl. solver (034515430) <-- small solution time, memory * 14 = 5-level additive solver (01(3+4+5)10) * 20 = 2-level multipl. solver (0[12]0) * * 0 = a Hiptmair-like smoother (010) * * The default value is 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCycleType(void *solver, HYPRE_Int cycle_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> cycle_type = cycle_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetPrintLevel * * Control how much information is printed during the solution iterations. * The defaut values is 1 (print residual norm at each step). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetPrintLevel(void *solver, HYPRE_Int print_level) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> print_level = print_level; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetSmoothingOptions * * Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver, HYPRE_Int A_relax_type, HYPRE_Int A_relax_times, HYPRE_Real A_relax_weight, HYPRE_Real A_omega) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_relax_type = A_relax_type; ams_data -> A_relax_times = A_relax_times; ams_data -> A_relax_weight = A_relax_weight; ams_data -> A_omega = A_omega; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetChebySmoothingOptions * AB: note: this could be added to the above, * but I didn't want to change parameter list) * Set parameters for chebyshev smoother for A. Default values: 2,.3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver, HYPRE_Int A_cheby_order, HYPRE_Int A_cheby_fraction) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_cheby_order = A_cheby_order; ams_data -> A_cheby_fraction = A_cheby_fraction; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGOptions * * Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver, HYPRE_Int B_Pi_coarsen_type, HYPRE_Int B_Pi_agg_levels, HYPRE_Int B_Pi_relax_type, HYPRE_Real B_Pi_theta, HYPRE_Int B_Pi_interp_type, HYPRE_Int B_Pi_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type; ams_data -> B_Pi_agg_levels = B_Pi_agg_levels; ams_data -> B_Pi_relax_type = B_Pi_relax_type; ams_data -> B_Pi_theta = B_Pi_theta; ams_data -> B_Pi_interp_type = B_Pi_interp_type; ams_data -> B_Pi_Pmax = B_Pi_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_Pi. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver, HYPRE_Int B_Pi_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *)solver; ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGOptions * * Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver, HYPRE_Int B_G_coarsen_type, HYPRE_Int B_G_agg_levels, HYPRE_Int B_G_relax_type, HYPRE_Real B_G_theta, HYPRE_Int B_G_interp_type, HYPRE_Int B_G_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarsen_type = B_G_coarsen_type; ams_data -> B_G_agg_levels = B_G_agg_levels; ams_data -> B_G_relax_type = B_G_relax_type; ams_data -> B_G_theta = B_G_theta; ams_data -> B_G_interp_type = B_G_interp_type; ams_data -> B_G_Pmax = B_G_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_G. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver, HYPRE_Int B_G_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePi * * Construct the Pi interpolation matrix, which maps the space of vector * linear finite elements to the space of edge finite elements. * * The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z], * where each block has the same sparsity structure as G, and the entries * can be computed from the vectors Gx, Gy, Gz. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pi_ptr) { hypre_ParCSRMatrix *Pi; /* Compute Pi = [Pi_x, Pi_y, Pi_z] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); #ifdef HYPRE_NO_GLOBAL_PARTITION col_starts_size = 2; #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); col_starts_size = num_procs+1; #endif col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i]; Pi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pi) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(Pi) = 1; hypre_ParCSRMatrixInitialize(Pi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi); HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag); HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag); HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag); for (i = 0; i < G_diag_nrows+1; i++) Pi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi); HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd); HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd); HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) Pi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) Pi_cmap[dim*i+d] = (HYPRE_BigInt)dim*G_cmap[i]+(HYPRE_BigInt)d; } } *Pi_ptr = Pi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePixyz * * Construct the components Pix, Piy, Piz of the interpolation matrix Pi, * which maps the space of vector linear finite elements to the space of * edge finite elements. * * The construction is based on the fact that each component has the same * sparsity structure as G, and the entries can be computed from the vectors * Gx, Gy, Gz. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pix_ptr, hypre_ParCSRMatrix **Piy_ptr, hypre_ParCSRMatrix **Piz_ptr) { hypre_ParCSRMatrix *Pix, *Piy, *Piz; /* Compute Pix, Piy, Piz */ { HYPRE_Int i, j; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); Pix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pix) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(Pix) = 0; hypre_ParCSRMatrixInitialize(Pix); Piy = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piy) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(Piy) = 0; hypre_ParCSRMatrixInitialize(Piy); if (dim == 3) { Piz = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piz) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(Piz) = 0; hypre_ParCSRMatrixInitialize(Piz); } Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz); HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag); HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag); HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag); for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; Piz_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; Piz_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; *Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } else { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; } } /* Fill-in the off-diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz); HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd); HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd); HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; Piz_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; Piz_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; *Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; Piz_cmap[i] = G_cmap[i]; } } else { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; } } } *Pix_ptr = Pix; *Piy_ptr = Piy; if (dim == 3) *Piz_ptr = Piz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputeGPi * * Construct the matrix [G,Pi] which can be considered an interpolation * matrix from S_h^4 (4 copies of the scalar linear finite element space) * to the edge finite elements space. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **GPi_ptr) { hypre_ParCSRMatrix *GPi; /* Take into account G */ dim++; /* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); #ifdef HYPRE_NO_GLOBAL_PARTITION col_starts_size = 2; #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); col_starts_size = num_procs+1; #endif col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i]; GPi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(GPi) = 1; hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0; hypre_ParCSRMatrixOwnsColStarts(GPi) = 1; hypre_ParCSRMatrixInitialize(GPi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 4) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi); HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag); HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag); HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag); for (i = 0; i < G_diag_nrows+1; i++) GPi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *GPi_diag_data++ = G_diag_data[j]; *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi); HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd); HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd); HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) GPi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *GPi_offd_data++ = G_offd_data[j]; *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) GPi_cmap[dim*i+d] = dim*G_cmap[i]+d; } } *GPi_ptr = GPi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetup * * Construct the AMS solver components. * * The following functions need to be called before hypre_AMSSetup(): * - hypre_AMSSetDimension() (if solving a 2D problem) * - hypre_AMSSetDiscreteGradient() * - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int input_info = 0; ams_data -> A = A; /* Modifications for problems with zero-conductivity regions */ if (ams_data -> interior_nodes) { hypre_ParCSRMatrix *G0t, *Aorig = A; /* Make sure that multiple Setup()+Solve() give identical results */ ams_data -> solve_counter = 0; /* Construct the discrete gradient matrix for the zero-conductivity region by eliminating the zero-conductivity nodes from G^t. The range of G0 represents the kernel of A, i.e. the gradients of nodal basis functions supported in zero-conductivity regions. */ hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1); { HYPRE_Int i, j; HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G); hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t); HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td); HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td); hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t); HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to); HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to); HYPRE_Real *interior_nodes_data=hypre_VectorData( hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes)); for (i = 0; i < nv; i++) { if (interior_nodes_data[i] != 1) { for (j = G0tdI[i]; j < G0tdI[i+1]; j++) G0tdA[j] = 0.0; if (G0toI) for (j = G0toI[i]; j < G0toI[i+1]; j++) G0toA[j] = 0.0; } } } hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1); /* Construct the subspace matrix A_G0 = G0^T G0 */ ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0); hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0); /* Create AMG solver for A_G0 */ HYPRE_BoomerAMGCreate(&ams_data -> B_G0); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */ HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3); HYPRE_BoomerAMGSetup(ams_data -> B_G0, (HYPRE_ParCSRMatrix)ams_data -> A_G0, 0, 0); /* Construct the preconditioner for ams_data->A = A + G0 G0^T. NOTE: this can be optimized significantly by taking into account that the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */ { hypre_ParCSRMatrix *A = hypre_ParMatmul(ams_data -> G0, G0t); hypre_ParCSRMatrix *B = Aorig; hypre_ParCSRMatrix **C_ptr = &ams_data -> A; hypre_ParCSRMatrix *C; hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B); /* scale (penalize) G0 G0^T before adding it to the matrix */ { HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local); HYPRE_Real *data = hypre_CSRMatrixData(A_local); HYPRE_Real *dataB = hypre_CSRMatrixData(B_local); HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local); HYPRE_Real factor, lfactor; lfactor = -1; for (i = 0; i < nnzB; i++) if (fabs(dataB[i]) > lfactor) lfactor = fabs(dataB[i]); lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */ hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); for (i = 0; i < nnz; i++) data[i] *= factor; } C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local); hypre_CSRMatrixBigJtoJ(C_tmp); C_local = hypre_CSRMatrixDeleteZeros(C_tmp,0.0); if (C_local) hypre_CSRMatrixDestroy(C_tmp); else C_local = C_tmp; C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 1; hypre_ParCSRMatrixOwnsColStarts(G0t) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); hypre_ParCSRMatrixDestroy(A); *C_ptr = C; } hypre_ParCSRMatrixDestroy(G0t); } /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */ /* Compute the l1 norm of the rows of A */ if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4) hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &ams_data -> A_l1_norms); /* Chebyshev? */ if (ams_data -> A_relax_type == 16) { hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10, &ams_data->A_max_eig_est, &ams_data->A_min_eig_est); } /* If not given, compute Gx, Gy and Gz */ { if (ams_data -> x != NULL && ams_data -> y != NULL && (ams_data -> dim == 2 || ams_data -> z != NULL)) input_info = 1; if (ams_data -> Gx != NULL && ams_data -> Gy != NULL && (ams_data -> dim == 2 || ams_data -> Gz != NULL)) input_info = 2; if (input_info == 1) { ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx); ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy); if (ams_data -> dim == 3) { ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz); } } } if (ams_data -> Pi == NULL && ams_data -> Pix == NULL) { if (ams_data -> cycle_type == 20) /* Construct the combined interpolation matrix [G,Pi] */ hypre_AMSComputeGPi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); else if (ams_data -> cycle_type > 10) /* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */ hypre_AMSComputePixyz(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pix, &ams_data -> Piy, &ams_data -> Piz); else /* Construct the Pi interpolation matrix */ hypre_AMSComputePi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); } /* Keep Gx, Gy and Gz only if use the method with discrete divergence stabilization (where we use them to compute the local mesh size). */ if (input_info == 1 && ams_data -> cycle_type != 9) { hypre_ParVectorDestroy(ams_data -> Gx); hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } /* Create the AMG solver on the range of G^T */ if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20) { HYPRE_BoomerAMGCreate(&ams_data -> B_G); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2); /* If not given, construct the coarse space matrix by RAP */ if (!ams_data -> A_G) { HYPRE_Int G_owned_col_starts; if (!hypre_ParCSRMatrixCommPkg(ams_data -> G)) hypre_MatvecCommPkgCreate(ams_data -> G); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G); hypre_BoomerAMGBuildCoarseOperator(ams_data -> G, ams_data -> A, ams_data -> G, &ams_data -> A_G); /* Make sure that A_G has no zero rows (this can happen if beta is zero in part of the domain). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G); hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts; hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0; ams_data -> owns_A_G = 1; } HYPRE_BoomerAMGSetup(ams_data -> B_G, (HYPRE_ParCSRMatrix)ams_data -> A_G, 0, 0); } if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20) /* Create the AMG solvers on the range of Pi{x,y,z}^T */ { HYPRE_Int P_owned_col_starts; HYPRE_BoomerAMGCreate(&ams_data -> B_Pix); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piy); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piz); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2); /* Generally, don't use exact solve on the coarsest level (matrices may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) { HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2); } /* Construct the coarse space matrices by RAP */ if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix)) hypre_MatvecCommPkgCreate(ams_data -> Pix); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix, ams_data -> A, ams_data -> Pix, &ams_data -> A_Pix); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0; } /* Make sure that A_Pix has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix); HYPRE_BoomerAMGSetup(ams_data -> B_Pix, (HYPRE_ParCSRMatrix)ams_data -> A_Pix, 0, 0); if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy)) hypre_MatvecCommPkgCreate(ams_data -> Piy); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy, ams_data -> A, ams_data -> Piy, &ams_data -> A_Piy); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0; } /* Make sure that A_Piy has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy); HYPRE_BoomerAMGSetup(ams_data -> B_Piy, (HYPRE_ParCSRMatrix)ams_data -> A_Piy, 0, 0); if (ams_data -> Piz) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz)) hypre_MatvecCommPkgCreate(ams_data -> Piz); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz, ams_data -> A, ams_data -> Piz, &ams_data -> A_Piz); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0; } /* Make sure that A_Piz has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz); HYPRE_BoomerAMGSetup(ams_data -> B_Piz, (HYPRE_ParCSRMatrix)ams_data -> A_Piz, 0, 0); } } else /* Create the AMG solver on the range of Pi^T */ { HYPRE_BoomerAMGCreate(&ams_data -> B_Pi); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2); /* If not given, construct the coarse space matrix by RAP and notify BoomerAMG that this is a dim x dim block system. */ if (!ams_data -> A_Pi) { HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi)) hypre_MatvecCommPkgCreate(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); if (ams_data -> cycle_type == 9) { /* Add a discrete divergence term to A before computing Pi^t A Pi */ { hypre_ParCSRMatrix *Gt, *GGt, *ApGGt; hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1); hypre_ParCSRMatrixOwnsColStarts(Gt) = 0; hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0; /* scale GGt by h^2 */ { HYPRE_Real h2; HYPRE_Int i, j, k, ne; hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt); HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag); HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag); HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag); HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag); hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt); HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd); HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd); HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx)); HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy)); HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz)); for (i = 0; i < Gt_num_rows; i++) { /* determine the characteristic mesh size for vertex i */ h2 = 0.0; ne = 0; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) { k = Gt_diag_J[j]; h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k]; ne++; } if (ne != 0) { h2 /= ne; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) Gt_diag_data[j] *= h2; for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++) Gt_offd_data[j] *= h2; } } } /* we only needed Gx, Gy and Gz to compute the local mesh size */ if (input_info == 1) { hypre_ParVectorDestroy(ams_data -> Gx); hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } GGt = hypre_ParMatmul(ams_data -> G, Gt); hypre_ParCSRMatrixDestroy(Gt); /* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */ { hypre_ParCSRMatrix *A = GGt; hypre_ParCSRMatrix *B = ams_data -> A; hypre_ParCSRMatrix **C_ptr = &ApGGt; hypre_ParCSRMatrix *C; hypre_CSRMatrix *A_local, *B_local, *C_local; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B); C_local = hypre_CSRMatrixAdd(A_local, B_local); C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); *C_ptr = C; } hypre_ParCSRMatrixDestroy(GGt); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ApGGt, ams_data -> Pi, &ams_data -> A_Pi); } } else { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ams_data -> A, ams_data -> Pi, &ams_data -> A_Pi); } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0; } ams_data -> owns_A_Pi = 1; if (ams_data -> cycle_type != 20) HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim); else HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1); /* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */ } /* Make sure that A_Pi has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi); HYPRE_BoomerAMGSetup(ams_data -> B_Pi, (HYPRE_ParCSRMatrix)ams_data -> A_Pi, 0, 0); } /* Allocate temporary vectors */ ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A); ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A); if (ams_data -> A_G) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G); } if (ams_data -> r1 == NULL && ams_data -> A_Pix) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); } if (ams_data -> Pi) { ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi); ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSolve * * Solve the system A x = b. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSolve(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, my_id = -1; HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid; char cycle[30]; hypre_ParCSRMatrix *Ai[5], *Pi[5]; HYPRE_Solver Bi[5]; HYPRE_PtrToSolverFcn HBi[5]; hypre_ParVector *ri[5], *gi[5]; hypre_ParVector *z = NULL; Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G; Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi; Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix; Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy; Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz; Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve; Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; ri[0] = ams_data -> r1; gi[0] = ams_data -> g1; ri[1] = ams_data -> r2; gi[1] = ams_data -> g2; ri[2] = ams_data -> r1; gi[2] = ams_data -> g1; ri[3] = ams_data -> r1; gi[3] = ams_data -> g1; ri[4] = ams_data -> r1; gi[4] = ams_data -> g1; /* may need to create an additional temporary vector for relaxation */ if (hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16) { z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(z); hypre_ParVectorSetPartitioningOwner(z,0); } if (ams_data -> print_level > 0) hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id); /* Compatible subspace projection for problems with zero-conductivity regions. Note that this modifies the input (r.h.s.) vector b! */ if ( (ams_data -> B_G0) && (++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) ) { /* hypre_printf("Projecting onto the compatible subspace...\n"); */ hypre_AMSProjectOutGradients(ams_data, b); } if (ams_data -> beta_is_zero) { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","0"); break; case 1: case 3: case 5: case 7: default: hypre_sprintf(cycle,"%s","020"); break; case 2: case 4: case 6: case 8: hypre_sprintf(cycle,"%s","(0+2)"); break; case 11: case 13: hypre_sprintf(cycle,"%s","0345430"); break; case 12: hypre_sprintf(cycle,"%s","(0+3+4+5)"); break; case 14: hypre_sprintf(cycle,"%s","0(+3+4+5)0"); break; } } else { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","010"); break; case 1: default: hypre_sprintf(cycle,"%s","01210"); break; case 2: hypre_sprintf(cycle,"%s","(0+1+2)"); break; case 3: hypre_sprintf(cycle,"%s","02120"); break; case 4: hypre_sprintf(cycle,"%s","(010+2)"); break; case 5: hypre_sprintf(cycle,"%s","0102010"); break; case 6: hypre_sprintf(cycle,"%s","(020+1)"); break; case 7: hypre_sprintf(cycle,"%s","0201020"); break; case 8: hypre_sprintf(cycle,"%s","0(+1+2)0"); break; case 9: hypre_sprintf(cycle,"%s","01210"); break; case 11: hypre_sprintf(cycle,"%s","013454310"); break; case 12: hypre_sprintf(cycle,"%s","(0+1+3+4+5)"); break; case 13: hypre_sprintf(cycle,"%s","034515430"); break; case 14: hypre_sprintf(cycle,"%s","01(+3+4+5)10"); break; case 20: hypre_sprintf(cycle,"%s","020"); break; } } for (i = 0; i < ams_data -> maxit; i++) { /* Compute initial residual norms */ if (ams_data -> maxit > 1 && i == 0) { hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); r0_norm = r_norm; b_norm = sqrt(hypre_ParVectorInnerProd(b, b)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) { hypre_printf(" relative\n"); hypre_printf(" residual factor residual\n"); hypre_printf(" -------- ------ --------\n"); hypre_printf(" Initial %e %e\n", r_norm, relative_resid); } } /* Apply the preconditioner */ hypre_ParCSRSubspacePrec(ams_data -> A, ams_data -> A_relax_type, ams_data -> A_relax_times, ams_data -> A_l1_norms, ams_data -> A_relax_weight, ams_data -> A_omega, ams_data -> A_max_eig_est, ams_data -> A_min_eig_est, ams_data -> A_cheby_order, ams_data -> A_cheby_fraction, Ai, Bi, HBi, Pi, ri, gi, b, x, ams_data -> r0, ams_data -> g0, cycle, z); /* Compute new residual norms */ if (ams_data -> maxit > 1) { old_resid = r_norm; hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) hypre_printf(" Cycle %2d %e %f %e \n", i+1, r_norm, r_norm / old_resid, relative_resid); } if (relative_resid < ams_data -> tol) { i++; break; } } if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1) hypre_printf("\n\n Average Convergence Factor = %f\n\n", pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i))); ams_data -> num_iterations = i; ams_data -> rel_resid_norm = relative_resid; if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0) hypre_error(HYPRE_ERROR_CONV); if (z) hypre_ParVectorDestroy(z); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRSubspacePrec * * General subspace preconditioner for A0 y = x, based on ParCSR storage. * * P[i] and A[i] are the interpolation and coarse grid matrices for * the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i] * are temporary vectors. A0_* are the fine grid smoothing parameters. * * The default mode is multiplicative, '+' changes the next correction * to additive, based on residual computed at '('. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */ hypre_ParCSRMatrix *A0, /* relaxation parameters */ HYPRE_Int A0_relax_type, HYPRE_Int A0_relax_times, HYPRE_Real *A0_l1_norms, HYPRE_Real A0_relax_weight, HYPRE_Real A0_omega, HYPRE_Real A0_max_eig_est, HYPRE_Real A0_min_eig_est, HYPRE_Int A0_cheby_order, HYPRE_Real A0_cheby_fraction, /* subspace matrices */ hypre_ParCSRMatrix **A, /* subspace preconditioners */ HYPRE_Solver *B, /* hypre solver functions for B */ HYPRE_PtrToSolverFcn *HB, /* subspace interpolations */ hypre_ParCSRMatrix **P, /* temporary subspace vectors */ hypre_ParVector **r, hypre_ParVector **g, /* right-hand side */ hypre_ParVector *x, /* current approximation */ hypre_ParVector *y, /* current residual */ hypre_ParVector *r0, /* temporary vector */ hypre_ParVector *g0, char *cycle, /* temporary vector */ hypre_ParVector *z) { char *op; HYPRE_Int use_saved_residual = 0; for (op = cycle; *op != '\0'; op++) { /* do nothing */ if (*op == ')') continue; /* compute the residual: r = x - Ay */ else if (*op == '(') { hypre_ParVectorCopy(x,r0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0); } /* switch to additive correction */ else if (*op == '+') { use_saved_residual = 1; continue; } /* smooth: y += S (x - Ay) */ else if (*op == '0') { hypre_ParCSRRelax(A0, x, A0_relax_type, A0_relax_times, A0_l1_norms, A0_relax_weight, A0_omega, A0_max_eig_est, A0_min_eig_est, A0_cheby_order, A0_cheby_fraction, y, g0, z); } /* subspace correction: y += P B^{-1} P^t r */ else { HYPRE_Int i = *op - '1'; if (i < 0) hypre_error_in_arg(16); /* skip empty subspaces */ if (!A[i]) continue; /* compute the residual? */ if (use_saved_residual) { use_saved_residual = 0; hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]); } else { hypre_ParVectorCopy(x,g0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0); hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]); } hypre_ParVectorSetConstantValues(g[i], 0.0); (*HB[i]) (B[i], (HYPRE_Matrix)A[i], (HYPRE_Vector)r[i], (HYPRE_Vector)g[i]); hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0); hypre_ParVectorAxpy(1.0, g0, y); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetNumIterations * * Get the number of AMS iterations. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetNumIterations(void *solver, HYPRE_Int *num_iterations) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *num_iterations = ams_data -> num_iterations; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetFinalRelativeResidualNorm * * Get the final relative residual norm in AMS. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver, HYPRE_Real *rel_resid_norm) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *rel_resid_norm = ams_data -> rel_resid_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSProjectOutGradients * * For problems with zero-conductivity regions, project the vector onto the * compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the * discrete gradient restricted to the interior nodes of the regions with * zero conductivity. This ensures that x is orthogonal to the gradients in * the range of G0. * * This function is typically called after the solution iteration is complete, * in order to facilitate the visualization of the computed field. Without it * the values in the zero-conductivity regions contain kernel components. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSProjectOutGradients(void *solver, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> B_G0) { hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1); hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0); hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1); hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0); hypre_ParVectorAxpy(-1.0, ams_data -> g0, x); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSConstructDiscreteGradient * * Construct and return the lowest-order discrete gradient matrix G, based on: * - a matrix on the egdes (e.g. the stiffness matrix A) * - a vector on the vertices (e.g. the x coordinates) * - the array edge_vertex, which lists the global indexes of the * vertices of the local edges. * * We assume that edge_vertex lists the edge vertices consecutively, * and that the orientation of all edges is consistent. More specificaly: * If edge_orientation = 1, the edges are already oriented. * If edge_orientation = 2, the orientation of edge i depends only on the * sign of edge_vertex[2*i+1] - edge_vertex[2*i]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A, hypre_ParVector *x_coord, HYPRE_Int *edge_vertex, HYPRE_Int edge_orientation, hypre_ParCSRMatrix **G_ptr) { hypre_ParCSRMatrix *G; HYPRE_Int nedges; nedges = hypre_ParCSRMatrixNumRows(A); /* Construct the local part of G based on edge_vertex and the edge and vertex partitionings from A and x_coord */ { HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1, HYPRE_MEMORY_HOST); HYPRE_Int part_size; HYPRE_BigInt *row_starts, *col_starts; HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges, hypre_ParVectorGlobalSize(x_coord), 2*nedges); for (i = 0; i <= nedges; i++) I[i] = 2*i; if (edge_orientation == 1) { /* Assume that the edges are already oriented */ for (i = 0; i < 2*nedges; i+=2) { data[i] = -1.0; data[i+1] = 1.0; } } else if (edge_orientation == 2) { /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*nedges; i+=2) { if (edge_vertex[i] < edge_vertex[i+1]) { data[i] = -1.0; data[i+1] = 1.0; } else { data[i] = 1.0; data[i+1] = -1.0; } } } else hypre_error_in_arg(4); hypre_CSRMatrixI(local) = I; hypre_CSRMatrixJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = nedges; /* Copy partitioning from A and x_coord (previously they were re-used) */ #ifdef HYPRE_NO_GLOBAL_PARTITION part_size = 2; #else hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &part_size); part_size++; #endif row_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST); col_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST); for (i = 0; i < part_size; i++) { row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i]; col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i]; } /* Generate the discrete gradient matrix */ G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParVectorGlobalSize(x_coord), row_starts, col_starts, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 1; hypre_ParCSRMatrixOwnsColStarts(G) = 1; GenerateDiagAndOffd(local, G, hypre_ParVectorFirstIndex(x_coord), hypre_ParVectorLastIndex(x_coord)); /* Account for empty rows in G. These may appear when A includes only the interior (non-Dirichlet b.c.) edges. */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord)); } /* Free the local matrix */ hypre_CSRMatrixJ(local) = NULL; hypre_CSRMatrixDestroy(local); } *G_ptr = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEISetup * * Construct an AMS solver object based on the following data: * * A - the edge element stiffness matrix * num_vert - number of vertices (nodes) in the processor * num_local_vert - number of vertices owned by the processor * vert_number - global indexes of the vertices in the processor * vert_coord - coordinates of the vertices in the processor * num_edges - number of edges owned by the processor * edge_vertex - the vertices of the edges owned by the processor. * Vertices are in local numbering (the same as in * vert_number), and edge orientation is always from * the first to the second vertex. * * Here we distinguish between vertices that belong to elements in the * current processor, and the subset of these vertices that is owned by * the processor. * * This function is written specifically for input from the FEI and should * be called before hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEISetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x, HYPRE_Int num_vert, HYPRE_Int num_local_vert, HYPRE_BigInt *vert_number, HYPRE_Real *vert_coord, HYPRE_Int num_edges, HYPRE_Int *edge_vertex) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, j; hypre_ParCSRMatrix *G; hypre_ParVector *x_coord, *y_coord, *z_coord; HYPRE_Real *x_data, *y_data, *z_data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt *vert_part, num_global_vert; HYPRE_BigInt vert_start, vert_end; HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert; HYPRE_BigInt *big_edge_vertex; /* Find the processor partitioning of the vertices */ #ifdef HYPRE_NO_GLOBAL_PARTITION vert_part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); vert_part[0] = vert_part[1] - big_local_vert; hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); vert_part = hypre_TAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&big_local_vert, 1, HYPRE_MPI_BIG_INT, &vert_part[1], 1, HYPRE_MPI_BIG_INT, comm); vert_part[0] = 0; for (i = 0; i < num_procs; i++) vert_part[i+1] += vert_part[i]; num_global_vert = vert_part[num_procs]; #endif /* Construct hypre parallel vectors for the vertex coordinates */ x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(x_coord); hypre_ParVectorOwnsData(x_coord) = 1; hypre_ParVectorOwnsPartitioning(x_coord) = 0; x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord)); y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(y_coord); hypre_ParVectorOwnsData(y_coord) = 1; hypre_ParVectorOwnsPartitioning(y_coord) = 0; y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord)); z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(z_coord); hypre_ParVectorOwnsData(z_coord) = 1; hypre_ParVectorOwnsPartitioning(z_coord) = 0; z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord)); vert_start = hypre_ParVectorFirstIndex(x_coord); vert_end = hypre_ParVectorLastIndex(x_coord); /* Save coordinates of locally owned vertices */ for (i = 0; i < num_vert; i++) { if (vert_number[i] >= vert_start && vert_number[i] <= vert_end) { j = (HYPRE_Int)(vert_number[i] - vert_start); x_data[j] = vert_coord[3*i]; y_data[j] = vert_coord[3*i+1]; z_data[j] = vert_coord[3*i+2]; } } /* Change vertex numbers from local to global */ big_edge_vertex = hypre_CTAlloc(HYPRE_BigInt, 2*num_edges, HYPRE_MEMORY_HOST); for (i = 0; i < 2*num_edges; i++) big_edge_vertex[i] = vert_number[edge_vertex[i]]; /* Construct the local part of G based on edge_vertex */ { /* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */ HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1, HYPRE_MEMORY_HOST); HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges, num_global_vert, 2*num_edges); for (i = 0; i <= num_edges; i++) I[i] = 2*i; /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*num_edges; i+=2) { data[i] = 1.0; data[i+1] = -1.0; } hypre_CSRMatrixI(local) = I; hypre_CSRMatrixBigJ(local) = big_edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = num_edges; G = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), num_global_vert, hypre_ParCSRMatrixRowStarts(A), vert_part, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 0; hypre_ParCSRMatrixOwnsColStarts(G) = 1; GenerateDiagAndOffd(local, G, vert_start, vert_end); hypre_CSRMatrixJ(local) = NULL; hypre_CSRMatrixDestroy(local); } hypre_TFree(big_edge_vertex, HYPRE_MEMORY_HOST); ams_data -> G = G; ams_data -> x = x_coord; ams_data -> y = y_coord; ams_data -> z = z_coord; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEIDestroy * * Free the additional memory allocated in hypre_AMSFEISetup(). * * This function is written specifically for input from the FEI and should * be called before hypre_AMSDestroy(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEIDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> G) hypre_ParCSRMatrixDestroy(ams_data -> G); if (ams_data -> x) hypre_ParVectorDestroy(ams_data -> x); if (ams_data -> y) hypre_ParVectorDestroy(ams_data -> y); if (ams_data -> z) hypre_ParVectorDestroy(ams_data -> z); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms Threads * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int num_threads, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real diag; HYPRE_Real *l1_norm = hypre_CTAlloc(HYPRE_Real, num_rows, HYPRE_MEMORY_SHARED); HYPRE_Int ii, ns, ne, rest, size; HYPRE_Int *cf_marker_offd = NULL; HYPRE_Int cf_diag; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE #endif for (k = 0; k < num_threads; k++) { size = num_rows/num_threads; rest = num_rows - size*num_threads; if (k < rest) { ns = k*size+k; ne = (k+1)*size+k+1; } else { ns = k*size+rest; ne = (k+1)*size+rest; } if (option == 1) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += fabs(A_diag_data[j]); /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (cf_diag == cf_marker[A_diag_J[j]]) l1_norm[i] += fabs(A_diag_data[j]); /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 2) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 3) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += A_diag_data[j] * A_diag_data[j]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += A_offd_data[j] * A_offd_data[j]; } } else if (option == 4) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } /* Truncate according to Remark 6.2 */ if (l1_norm[i] <= 4.0/3.0*diag) l1_norm[i] = diag; } } /* Handle negative definite matrices */ for (i = ns; i < ne; i++) if (A_diag_data[A_diag_I[i]] < 0) l1_norm[i] = -l1_norm[i]; for (i = ns; i < ne; i++) /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } } hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST); *l1_norm_ptr = l1_norm; #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD #pragma omp target enter data map(to:l1_norm[0:num_rows]) #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRRelaxThreads * 1 = l1-scaled Jacobi * 2 = l1-scaled block Gauss-Seidel/SSOR *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelaxThreads(hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int relax_type, HYPRE_Int relax_times, HYPRE_Real *l1_norms, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *z) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data; HYPRE_Real *v_buf_data; HYPRE_Real *tmp_data; HYPRE_Int i, j; HYPRE_Int ii, jj; HYPRE_Int ns, ne, size, rest; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, num_threads, my_id; HYPRE_Real zero = 0.0; HYPRE_Real res, res2; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /* only allow jacobi and GS */ if (relax_type > 2) relax_type = 2; /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } if (relax_type == 1) /* Jacobi */ { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (relax_weight*res)/l1_norms[i]; } } } else if (relax_type == 2) /* GS */ { if (relax_weight == 1 && omega == 1) { tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } else { HYPRE_Real c1 = omega*relax_weight; HYPRE_Real c2 = omega*(1.0-relax_weight); tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { tmp_data[i] = u_data[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res2 = 0.0; res = f_data[i]; Vtemp_data[i] = u_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; if (ii < i) res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]); } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (c1*res + c2*res2) / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; if (ii > i) res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]); } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (c1*res + c2*res2) / l1_norms[i]; } } } hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } /* end of Jacobi or G.S. */ if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } return(relax_error); }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
chunk.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <unistd.h> #define N 1000 #define CHUNK 10 int main() { int n_threads, i; /* Schedule allows you to create the scheme with which the threads distribute the work of an iteration of a cycle. "dynamic" with CHUNK: scheduling works on a "first come, first served" basis. In this way each thread has an iteration, when it ends it will be assigned the next iterationin this way, each thread will be associated with a predetermined number of iterations, so when it is finished it will have assigned a new chunk. By increasing the size of the chunk, scheduling tends to static mode, while decreasing it, scheduling tends to dynamic */ #pragma omp parallel for private(i) schedule(dynamic, CHUNK) num_threads(4) for(i=0; i<16; i++) { //wait i second sleep(i); printf("The thread %d has completed the iteration %d\n", omp_get_thread_num(), i); } printf("All threads have ended!\n"); return 0; }
gemv_x_bsr_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #include <string.h> #endif static alphasparse_status_t gemv_bsr_trans_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { ALPHA_INT bs = A->block_size; ALPHA_INT bs2 = bs * bs; ALPHA_INT m_inner = A->rows; ALPHA_INT n_inner = A->cols; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_INT partition[thread_num + 1]; balanced_partition_row_by_nnz(A->rows_end, m_inner, thread_num, partition); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); #ifdef _OPENMP #pragma omp parallel num_threads(thread_num) #endif { const ALPHA_INT tid = alpha_get_thread_id(); const ALPHA_INT local_m_s = partition[tid]; const ALPHA_INT local_m_e = partition[tid + 1]; tmp[tid] = (ALPHA_Number*)malloc(sizeof(ALPHA_Number)*n_inner*bs); memset(tmp[tid], 0, sizeof(ALPHA_Number)*n_inner*bs); if(A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR){ for (ALPHA_INT i = local_m_s; i < local_m_e; i++) { for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i];ai++) { // A index is (bs * i + block_row, bs * A->col_indx[ai] + block_col) // should multiplied by x[bs * i + block_row], for(ALPHA_INT block_row = 0; block_row < bs; block_row++){ for(ALPHA_INT block_col = 0; block_col < bs; block_col++){ alpha_madde(tmp[tid][bs*A->col_indx[ai]+block_col], A->values[ai*bs2+block_col+block_row*bs], x[bs*i+block_row]); } } } } } else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR){ for (ALPHA_INT i = local_m_s; i < local_m_e; i++) { for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i];ai++) { // index is (bs * i + block_row, bs * A->col_indx[ai] + block_col) // should multiplied by x[bs * i + block_row], for(ALPHA_INT block_col = 0; block_col < bs; block_col++){ for(ALPHA_INT block_row = 0; block_row < bs; block_row++){ alpha_madde(tmp[tid][bs*A->col_indx[ai]+block_col], A->values[ai*bs2+block_col*bs+block_row], x[bs*i+ block_row]); } } } } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < n_inner*bs; ++i){ ALPHA_Number tmp_y; alpha_setzero(tmp_y); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(tmp_y, tmp_y, tmp[j][i]); //tmp_y += tmp[j][i]; } alpha_mul(y[i], y[i], beta); alpha_madde(y[i], tmp_y, alpha); //y[i] = y[i]*beta + tmp_y*alpha; } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < thread_num; ++i) { free(tmp[i]); } free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { return gemv_bsr_trans_omp(alpha, A, x, beta, y); }
structural_variation.c
#include "valorconfig.h" #include "structural_variation.h" #include <stdio.h> #include "progress.h" #include "sonic/sonic.h" #include "cnv.h" void sv_fprint(FILE *stream, int chr, sv_t *t){ fprintf(stream,"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%s\n", chr, t->AB.start1, t->AB.end1, t->AB.start2, t->AB.end2, t->CD.start1, t->CD.end1, t->CD.start2, t->CD.end2, t->supports[0], t->supports[1], sv_type_name(t->type)); } int _svcmp(const void *v1, const void *v2, size_t size){ const sv_t *s1 = v1; const sv_t *s2 = v2; if(s1->AB.start1 != s2->AB.start2){ return s1->AB.start1 - s2->AB.start1; } if(s1->AB.end1 != s2->AB.end1){ return s1->AB.end1 - s2->AB.end1; } if(s1->AB.start2 != s2->AB.start2){ return s1->AB.start2 - s2->AB.start2; } if(s1->AB.end2 != s2->AB.end2){ return s1->AB.end2 - s2->AB.end2; } if(s1->CD.start1 != s2->CD.start2){ return s1->CD.start1 - s2->CD.start1; } if(s1->CD.end1 != s2->CD.end1){ return s1->CD.end1 - s2->CD.end1; } if(s1->CD.start2 != s2->CD.start2){ return s1->CD.start2 - s2->CD.start2; } if(s1->CD.end2 != s2->CD.end2){ return s1->CD.end2 - s2->CD.end2; } return 0; } int sv_compd(const void *v1, const void *v2, size_t val){ return sv_comp(v1,v2); } int sv_comp(const void *v1, const void *v2){ sv_t *s1 = *(void **)v1; sv_t *s2 = *(void **)v2; if(s1->AB.start1 != s2->AB.start2){ return s1->AB.start1 - s2->AB.start1; } if(s1->AB.end1 != s2->AB.end1){ return s1->AB.end1 - s2->AB.end1; } if(s1->AB.start2 != s2->AB.start2){ return s1->AB.start2 - s2->AB.start2; } if(s1->AB.end2 != s2->AB.end2){ return s1->AB.end2 - s2->AB.end2; } if(s1->CD.start1 != s2->CD.start2){ return s1->CD.start1 - s2->CD.start1; } if(s1->CD.end1 != s2->CD.end1){ return s1->CD.end1 - s2->CD.end1; } if(s1->CD.start2 != s2->CD.start2){ return s1->CD.start2 - s2->CD.start2; } if(s1->CD.end2 != s2->CD.end2){ return s1->CD.end2 - s2->CD.end2; } return 0; } //TODO consider barcode int sv_equals(const void *i1, const void *i2){ const sv_t *ii1 = i1; const sv_t *ii2 = i2; return !(ii1->AB.start1 == ii2->AB.start1 && ii1->AB.start2 == ii2->AB.start2 && ii1->AB.end1 == ii2->AB.end1 && ii1->AB.end2 == ii2->AB.end2 && ii1->CD.start1 == ii2->CD.start1 && ii1->CD.start2 == ii2->CD.start2 && ii1->CD.end1 == ii2->CD.end1 && ii1->CD.end2 == ii2->CD.end2 && ii1->AB.barcode == ii2->AB.barcode && ii1->CD.barcode == ii2->CD.barcode && ii1->type == ii2->type); // return memcmp(&(ii1->AB),&(ii2->AB),sizeof(splitmolecule_t)) && // memcmp(&(ii1->CD),&(ii2->CD),sizeof(splitmolecule_t)); } void sv_g_dfs_step(graph_t *g, vector_t *comp, sv_t *sv){ sv->covered = 1; vector_put(comp,sv); vector_t *edges = graph_get_edges(g,sv); int i; for(i=0;i<edges->size;i++){ sv_t **val = vector_get(edges,i); if(!(*val)->covered){ sv_g_dfs_step(g,comp,*val); } } } vector_t *sv_g_dfs_components(graph_t *g){ adjlist_t *al = graph_to_al(g); vector_t *comps = vector_init(sizeof(vector_t),16); comps->rmv = &vector_free; int i; for(i=0;i<al->size;i++){ sv_t *sv = al_get_value(al,i); if( !sv->covered){ vector_t *comp = vector_init(sizeof(sv_t),40); comp->rmv = &sv_destroy; sv_g_dfs_step(g,comp,sv); vector_soft_put(comps,comp); } } vector_free(al); return comps; } #define BIG_PRIME 1300501 size_t sv_hf(hashtable_t *table, const void *vsv){ const sv_t *sv = vsv; size_t hash = 0; hash+=sv->AB.barcode; hash*=BIG_PRIME; hash= hash % table->size; hash+=sv->CD.barcode; hash*=BIG_PRIME; hash= hash % table->size; hash+=sv->AB.start1; hash*=BIG_PRIME; hash= hash % table->size; hash+=sv->AB.start2; hash*=BIG_PRIME; hash= hash % table->size; hash+=sv->AB.end1; hash*=BIG_PRIME; hash= hash % table->size; hash+=sv->AB.end2; hash*=BIG_PRIME; hash= hash % table->size; hash+=sv->CD.start1; hash*=BIG_PRIME; hash= hash % table->size; hash+=sv->CD.start2; hash*=BIG_PRIME; hash= hash % table->size; hash+=sv->CD.end1; hash*=BIG_PRIME; hash= hash % table->size; hash+=sv->CD.end2; hash*=BIG_PRIME; hash= hash % table->size; hash+=sv->type; hash*=BIG_PRIME; hash= hash % table->size; return hash; //return SuperFastHash((vsv),2*sizeof(splitmolecule_t)) % table->size; } int splitmolecule_indicates_inverted_duplication(splitmolecule_t s1, splitmolecule_t s2){ if( !(s1.start1 < s2.start1 && s1.end1 < s2.end1 && s1.start2 > s2.start2 && s1.end2 > s2.end2)){ return 0; } interval_10X A = (interval_10X){s1.start1,s1.end1,s1.barcode}; interval_10X B = (interval_10X){s1.start2,s1.end2,s1.barcode}; interval_10X C = (interval_10X){s2.start1,s2.end1,s2.barcode}; interval_10X D = (interval_10X){s2.start2,s2.end2,s2.barcode}; if(interval_inner_distance(A,C) < DUP_GAP && interval_inner_distance(A,C) > DUP_OVERLAP && interval_outer_distance(B,D) < DUP_MAX_SIZE && interval_outer_distance(B,D) > DUP_MIN_SIZE){ return DUP_BACK_COPY; } else if(interval_inner_distance(B,D) < DUP_GAP && interval_inner_distance(B,D) > DUP_OVERLAP && interval_outer_distance(A,C) < DUP_MAX_SIZE && interval_outer_distance(A,C) > DUP_MIN_SIZE){ return DUP_FORW_COPY; } return 0; } //TODO Sanity Check int splitmolecule_indicates_duplication(splitmolecule_t s1, splitmolecule_t s2){ if( !(s1.start1 < s2.start1 && s1.end1 < s2.end1 && s1.start2 < s2.start2 && s1.end2 < s2.end2)){ return 0; } interval_10X A = (interval_10X){s1.start1,s1.end1,s1.barcode}; interval_10X B = (interval_10X){s1.start2,s1.end2,s1.barcode}; interval_10X C = (interval_10X){s2.start1,s2.end1,s2.barcode}; interval_10X D = (interval_10X){s2.start2,s2.end2,s2.barcode}; if(interval_inner_distance(A,C) < DUP_GAP && interval_inner_distance(A,C) > DUP_OVERLAP && interval_outer_distance(B,D) < DUP_MAX_SIZE && interval_outer_distance(B,D) > DUP_MIN_SIZE){ return DUP_BACK_COPY; } else if(interval_inner_distance(B,D) < DUP_GAP && interval_inner_distance(B,D) > DUP_OVERLAP && interval_outer_distance(A,C) < DUP_MAX_SIZE && interval_outer_distance(A,C) > DUP_MIN_SIZE){ return DUP_FORW_COPY; } return 0; } //TODO make sure different barcode int splitmolecule_indicates_inversion(splitmolecule_t s1, splitmolecule_t s2){ if( abs(s2.end2-s1.start1)> INV_MAX_SIZE){ return 0;} return s1.start1 < s2.start1 && s1.end1 < s2.end1 && s1.start2 < s2.start2 && s1.end2 < s2.end2 && i_distance(s2.start1,s1.start1,s2.end1,s1.end1) > INV_OVERLAP && i_distance(s2.start2,s1.start2,s2.end2,s1.end2) > INV_OVERLAP && i_distance(s2.start1,s1.start1,s2.end1,s1.end1) < INV_GAP && i_distance(s2.start2,s1.start2,s2.end2,s1.end2) < INV_GAP; } int splitmolecule_indicates_sv(splitmolecule_t *s1, splitmolecule_t *s2, sv_type type){ switch(type){ case SV_INVERSION: return splitmolecule_indicates_inversion(*s1,*s2); case SV_DIRECT_DUPLICATION: return splitmolecule_indicates_duplication(*s1,*s2); case SV_INVERTED_DUPLICATION: return splitmolecule_indicates_inverted_duplication(*s1,*s2); case SV_TRANSLOCATION: return splitmolecule_indicates_duplication(*s1,*s2); case SV_INVERTED_TRANSLOCATION: return splitmolecule_indicates_inverted_duplication(*s1,*s2); default: fprintf(stderr,"Unknown SV type ordinal %d\n",type); VALOR_LOG("Unknown SV type ordinal %d\n",type); exit(-1); } return 0; } void sv_reset(sv_t *sv){ sv->covered = 0; sv->tabu = 0; sv->dv = 0; sv->inactive = 0; } sv_t *sv_init(splitmolecule_t *sc1,splitmolecule_t *sc2,sv_type type){ sv_t *new_i = getMem(sizeof(sv_t)); memset(new_i,0,sizeof(sv_t)); new_i->supports[0] = 1;//TODO fix this new_i->supports[1] = 1;//TODO fix this new_i->covered = 0; new_i->tabu = 0; new_i->dv = 0; new_i->inactive = 0; new_i->AB=*sc1; if(sc2!=NULL){//for sv's with 1 split molecule new_i->CD=*sc2; } else{ new_i->CD=(splitmolecule_t){0,0,0,0,0L}; } new_i->type=type; return new_i; } void sv_destroy(void *sv){ freeMem(sv,sizeof(sv_t)); } int inversion_overlaps(sv_t *i1, sv_t *i2){ return interval_pair_overlaps( &(splitmolecule_t){ MIN(i1->AB.end1,i1->CD.start1),MAX(i1->AB.end1,i1->CD.start1), MIN(i1->AB.end2,i1->CD.start2),MAX(i1->AB.end2,i1->CD.start2) }, &(splitmolecule_t){ MIN(i2->AB.end1,i2->CD.start1),MAX(i2->AB.end1,i2->CD.start1), MIN(i2->AB.end2,i2->CD.start2),MAX(i2->AB.end2,i2->CD.start2) },CLONE_MEAN); } //TODO change this if it needs change int duplication_overlaps(sv_t *i1, sv_t *i2){ return interval_pair_overlaps( &(splitmolecule_t){ MIN(i1->AB.end1,i1->CD.start1),MAX(i1->AB.end1,i1->CD.start1), MIN(i1->AB.end2,i1->CD.start2),MAX(i1->AB.end2,i1->CD.start2) }, &(splitmolecule_t){ MIN(i2->AB.end1,i2->CD.start1),MAX(i2->AB.end1,i2->CD.start1), MIN(i2->AB.end2,i2->CD.start2),MAX(i2->AB.end2,i2->CD.start2) },CLONE_MEAN); } int tandem_duplication_overlaps(sv_t *i1, sv_t *i2){ return interval_pair_overlaps( &(i1->AB),&(i2->AB) ,CLONE_MEAN); } int deletion_overlaps(sv_t *i1, sv_t *i2){ return interval_pair_overlaps( &(i1->AB),&(i2->AB) ,CLONE_MEAN); } int sv_overlaps(sv_t *i1, sv_t *i2){ if(i1->type!=i2->type){ return 0;} switch(i1->type){ case SV_INVERSION: return inversion_overlaps(i1,i2); case SV_DIRECT_DUPLICATION: return duplication_overlaps(i1,i2); case SV_INVERTED_DUPLICATION: return duplication_overlaps(i1,i2); case SV_TANDEM_DUPLICATION: return tandem_duplication_overlaps(i1,i2); case SV_TRANSLOCATION: return duplication_overlaps(i1,i2); case SV_INVERTED_TRANSLOCATION: return duplication_overlaps(i1,i2); case SV_DELETION: return deletion_overlaps(i1,i2); default: fprintf(stderr,"Unknown SV type ordinal %d\n",i1->type); VALOR_LOG("Unknown SV type ordinal %d\n",i1->type); exit(-1); } return 0; } #define MAGIC_NODE_MARKER -126 /* * * This removes every element in the `items` from `g` * Nodes in the `items` shouldn't have any edges outside of the `component` * * Achtung: * sv->dv shouldn't be -126. Make sure it is not -126 outside of this function * Make sure `g`->hf does not use sv->dv * Make sure `g`->key_cmp does not use sv->dv * (default is memcmp(a,b,sizeof(sv_t)), so Make sure it is changed) */ int g_remove_all(graph_t *g, vector_t *component,vector_t *items){ int i,j; for(i=0;i<items->size;i++){ void *item = vector_get(items,i); // graph_remove_node(g,item,G_REMOVE_SOFT); sv_t *sv = ht_get(g,item)->key; sv->dv = MAGIC_NODE_MARKER; //Hax } for(i=0;i<component->size;i++){ vector_t *edges = graph_get_edges(g,vector_get(component,i)); if(edges==NULL){continue;} edges->REMOVE_POLICY = REMP_LAZY; for(j=0;j<edges->size;j++){ sv_t **ptr = vector_get(edges,j); if((*ptr)->dv==MAGIC_NODE_MARKER){ vector_remove(edges,j); } } vector_defragment(edges); edges->REMOVE_POLICY = REMP_FAST; } for(i=0;i<items->size;i++){ void *item = vector_get(items,i); graph_remove_node(g,item,G_REMOVE_SOFT); } component->REMOVE_POLICY = REMP_LAZY; for(i=0;i<component->size;i++){ void *ptr = vector_get(component,i); if(!graph_have_node(g,ptr)){ vector_remove(component,i); } } vector_defragment(component); component->REMOVE_POLICY = REMP_SORTED; return 0; } void sv_graph_reset(graph_t *g){ int i,j; for(i=0;i<g->size;i++){ for(j=0;j<g->buckets[i]->size;j++){ pair_t *pair = vector_get(g->buckets[i],j); sv_reset(pair->key); } } } graph_t *make_sv_graph(vector_t *svs){ graph_t *g = graph_init(svs->size * 2, sizeof(sv_t)); g->hf = &sv_hf; g->key_cmp = &_svcmp; int i,j; for(i=0;i<svs->size;i++){ graph_put_node(g,vector_get(svs,i)); } for(i=0;i<svs->size;i++){ sv_t *a = vector_get(svs,i); for(j=i+1;j<svs->size;j++){ sv_t *b = vector_get(svs,j); if(sv_overlaps(a,b)){ graph_put_edge(g,a,b); graph_put_edge(g,b,a); } } } return g; } #define SV_INIT_LIMIT 10000 vector_t *find_svs(vector_t *split_molecules, sv_type type, int chr){ int i,j,k; vector_t *svs; int num_threads = 1; #ifdef _OPENMP num_threads = omp_get_num_threads(); #endif if(num_threads == 1){ svs = vector_init(sizeof(sv_t),SV_INIT_LIMIT); splitmolecule_t *AB; splitmolecule_t *CD; for(i=0;i<split_molecules->size;i++){ AB = vector_get(split_molecules,i); if(type&SV_DELETION){ sv_t *tmp = sv_init(AB,NULL,SV_DELETION); tmp->chr = chr; vector_soft_put(svs,tmp); } if(type&SV_TANDEM_DUPLICATION){ sv_t *tmp = sv_init(AB,NULL,SV_TANDEM_DUPLICATION); tmp->chr = chr; vector_soft_put(svs,tmp); } if(!(type & ( SV_INVERSION | SV_DIRECT_DUPLICATION | SV_INVERTED_DUPLICATION | SV_TRANSLOCATION | SV_INVERTED_TRANSLOCATION))){ continue;} for(j=0;j<split_molecules->size;j++){ if(i==j){continue;} CD = vector_get(split_molecules,j); for(k=SV_INVERSION;k<SV_MAX_ID;k=k<<1){ if((k&type)==0){ continue;} char orient = splitmolecule_indicates_sv(AB,CD,k); if(orient){ sv_t *tmp = sv_init(AB,CD,k); tmp->orientation = orient; tmp->chr = chr; vector_soft_put(svs,tmp); } } } } } else{ vector_t **osvs = malloc(sizeof(vector_t *) * num_threads); for(i=0;i < num_threads;i++){ osvs[i]=vector_init(sizeof(sv_t),SV_INIT_LIMIT); } #pragma omp parallel for for(i=0;i<split_molecules->size;i++){ splitmolecule_t *AB = vector_get(split_molecules,i); if(type & SV_DELETION){ sv_t *tmp = sv_init(AB,NULL,SV_DELETION); tmp->chr = chr; vector_soft_put(osvs[omp_get_thread_num()],tmp); } if(type&SV_TANDEM_DUPLICATION){ sv_t *tmp = sv_init(AB,NULL,SV_TANDEM_DUPLICATION); tmp->chr = chr; vector_soft_put(osvs[omp_get_thread_num()],tmp); } if(!(type & ( SV_INVERSION | SV_DIRECT_DUPLICATION | SV_INVERTED_DUPLICATION| SV_TRANSLOCATION | SV_INVERTED_TRANSLOCATION))){ continue;} for(j=0;j<split_molecules->size;j++){ if(i==j){continue;} splitmolecule_t *CD = vector_get(split_molecules,j); for(k=SV_INVERSION;k<SV_MAX_ID;k=k<<1){ if((k&type)==0){ continue;} char orient = splitmolecule_indicates_sv(AB,CD,type); if(orient){ sv_t *tmp = sv_init(AB,CD,type); tmp->orientation = orient; tmp->chr = chr; vector_soft_put(osvs[omp_get_thread_num()],tmp); } } } } size_t vsize = 0; for(i=0;i<num_threads;i++){ vsize+=osvs[i]->size; } svs = vector_init(sizeof(sv_t),vsize); for(i=0;i<num_threads;i++){ for(j=0;j<osvs[i]->size;j++){ vector_soft_put(svs,vector_get(osvs[i],j)); } vector_tabularasa(osvs[i]); vector_free(osvs[i]); } free(osvs); } return svs; } size_t scl_binary_searchdup(vector_t *intervals, splitmolecule_t *key){ if(intervals->size == 0){return -1;} long first, last; long mid = 0; first =0; last = intervals->size - 1; int counter = 0; while( first < last){ mid = (first + last)/2; if(IDIS_VECTOR_GET(intervals,mid)->end1 < key->start1){ first = mid + 1; } else{ last = mid - 1; } counter ++; } return mid; } void update_tandem_duplication_supports_b(sv_t *dup, vector_t *mp_reads){ int j; int mp_support = 0; int midAB = scl_binary_search(mp_reads,&(dup->AB)); for(j=midAB;j<mp_reads->size;j++){ if(interval_pair_overlaps(&(dup->AB),vector_get(mp_reads,j),MAX_FRAG_SIZE)){ mp_support++; } if(dup->AB.end1 < IDIS_VECTOR_GET(mp_reads,j)->start1){ break; } if(mp_support > MAX_SUPPORT){ break;} } dup->supports[1] = mp_support; dup->supports[0] = mp_support; } void update_deletion_supports_b(sv_t *del, vector_t *pm_reads){ int j; int pm_support; int midAB; pm_support = 0; midAB = scl_binary_search(pm_reads,&(del->AB)); for(j=midAB;j<pm_reads->size;j++){ if(interval_pair_overlaps(&(del->AB),vector_get(pm_reads,j),MAX_FRAG_SIZE)){//CLONE_MEAN)){ pm_support++; } if(del->AB.end1 < IDIS_VECTOR_GET(pm_reads,j)->start1){ break; } if(pm_support > MAX_SUPPORT){ break;} } del->supports[1] = pm_support; del->supports[0] = pm_support; } void update_deletion_supports(vector_t *dels, vector_t *pm_reads){ int i,j; int pm_support; int midAB; qsort(pm_reads->items,pm_reads->size,sizeof(interval_discordant *),interval_pair_comp); for(i=0;i<dels->size;i++){ pm_support = 0; midAB = scl_binary_search(pm_reads,&(SV_VECTOR_GET(dels,i)->AB)); for(j=midAB;j<pm_reads->size;j++){ if(interval_pair_overlaps(&(SV_VECTOR_GET(dels,i)->AB),vector_get(pm_reads,j),CLONE_MEAN)){ pm_support++; } if(SV_VECTOR_GET(dels,i)->AB.end1 < IDIS_VECTOR_GET(pm_reads,j)->start1){ break; } if(pm_support > MAX_SUPPORT){ break;} } SV_VECTOR_GET(dels,i)->supports[1] = pm_support; SV_VECTOR_GET(dels,i)->supports[0] = pm_support; } } void update_duplication_supports_b(sv_t *dup, vector_t *pm_reads, vector_t *mp_reads){ int j; int pm_support; int mp_support; int midAB; int midCD; pm_support = 0; mp_support = 0; if(dup->orientation == DUP_FORW_COPY){ midAB = scl_binary_search(pm_reads,&(dup->CD)); midCD = scl_binary_search(mp_reads,&(dup->AB)); for(j=midAB;j<pm_reads->size;j++){ // printf("midAB: %d, j %d",midAB,j); if(interval_pair_overlaps(&(dup->CD),vector_get(pm_reads,j),CLONE_MEAN)){ pm_support++; } // printf("\n"); if(dup->CD.end1 < IDIS_VECTOR_GET(pm_reads,j)->start1){ break; } if(pm_support > MAX_SUPPORT){ break;} } for(j=midCD;j<mp_reads->size;j++){ if(interval_pair_overlaps(&(dup->AB),vector_get(mp_reads,j),CLONE_MEAN)){ mp_support++; } if(dup->AB.end1 < IDIS_VECTOR_GET(mp_reads,j)->start1){ break; } if(mp_support > MAX_SUPPORT){ break;} } } else if(dup->orientation == DUP_BACK_COPY){ midAB = scl_binary_search(pm_reads,&(dup->AB)); midCD = scl_binary_search(mp_reads,&(dup->CD)); for(j=midAB;j<pm_reads->size;j++){ if(interval_pair_overlaps(&(dup->AB),vector_get(pm_reads,j),CLONE_MEAN)){ pm_support++; } if(dup->AB.end1 < IDIS_VECTOR_GET(pm_reads,j)->start1){ break; } if(pm_support > MAX_SUPPORT){ break;} } for(j=midCD;j<mp_reads->size;j++){ if(interval_pair_overlaps(&(dup->CD),vector_get(mp_reads,j),CLONE_MEAN)){ mp_support++; } if(dup->CD.end1 < IDIS_VECTOR_GET(mp_reads,j)->start1){ break; } if(mp_support > MAX_SUPPORT){ break;} } } dup->supports[0] = mp_support; dup->supports[1] = pm_support; } void update_duplication_supports(vector_t *dups, vector_t *pm_reads, vector_t *mp_reads){ int i,j; int pm_support; int mp_support; int midAB; int midCD; qsort(pm_reads->items,pm_reads->size,sizeof(interval_discordant *),interval_pair_comp); qsort(mp_reads->items,mp_reads->size,sizeof(interval_discordant *),interval_pair_comp); for(i=0;i<dups->size;i++){ pm_support = 0; mp_support = 0; if(SV_VECTOR_GET(dups,i)->orientation == DUP_FORW_COPY){ midAB = scl_binary_search(pm_reads,&(SV_VECTOR_GET(dups,i)->CD)); midCD = scl_binary_search(mp_reads,&(SV_VECTOR_GET(dups,i)->AB)); for(j=midAB;j<pm_reads->size;j++){ // printf("midAB: %d, j %d",midAB,j); if(interval_pair_overlaps(&(SV_VECTOR_GET(dups,i)->CD),vector_get(pm_reads,j),MAX_FRAG_SIZE)){ pm_support++; } // printf("\n"); if(SV_VECTOR_GET(dups,i)->CD.end1 < IDIS_VECTOR_GET(pm_reads,j)->start1){ break; } if(pm_support > MAX_SUPPORT){ break;} } for(j=midCD;j<mp_reads->size;j++){ if(interval_pair_overlaps(&(SV_VECTOR_GET(dups,i)->AB),vector_get(mp_reads,j),MAX_FRAG_SIZE)){ mp_support++; } if(SV_VECTOR_GET(dups,i)->AB.end1 < IDIS_VECTOR_GET(mp_reads,j)->start1){ break; } if(mp_support > MAX_SUPPORT){ break;} } } else if(SV_VECTOR_GET(dups,i)->orientation == DUP_BACK_COPY){ midAB = scl_binary_search(pm_reads,&(SV_VECTOR_GET(dups,i)->AB)); midCD = scl_binary_search(mp_reads,&(SV_VECTOR_GET(dups,i)->CD)); for(j=midAB;j<pm_reads->size;j++){ if(interval_pair_overlaps(&(SV_VECTOR_GET(dups,i)->AB),vector_get(pm_reads,j),MAX_FRAG_SIZE)){ pm_support++; } if(SV_VECTOR_GET(dups,i)->AB.end1 < IDIS_VECTOR_GET(pm_reads,j)->start1){ break; } if(pm_support > MAX_SUPPORT){ break;} } for(j=midCD;j<mp_reads->size;j++){ if(interval_pair_overlaps(&(SV_VECTOR_GET(dups,i)->CD),vector_get(mp_reads,j),MAX_FRAG_SIZE)){ mp_support++; } if(SV_VECTOR_GET(dups,i)->CD.end1 < IDIS_VECTOR_GET(mp_reads,j)->start1){ break; } if(mp_support > MAX_SUPPORT){ break;} } } SV_VECTOR_GET(dups,i)->supports[0] = mp_support; SV_VECTOR_GET(dups,i)->supports[1] = pm_support; } } void update_inversion_supports_b(sv_t *inv, vector_t *pp_reads, vector_t *mm_reads){ int j; int pp_support; int mm_support; int midAB; int midCD; pp_support = 0; mm_support = 0; midAB = scl_binary_search(pp_reads,&(inv->AB)); midCD = scl_binary_search(mm_reads,&(inv->CD)); for(j=midAB;j<pp_reads->size;j++){ if(interval_pair_overlaps(&(inv->AB),vector_get(pp_reads,j),CLONE_MEAN/2)){ pp_support++; } if(inv->AB.end1 < IDIS_VECTOR_GET(pp_reads,j)->start1){ break; } if(pp_support > MAX_SUPPORT){ break;} } for(j=midCD;j<mm_reads->size;j++){ if(interval_pair_overlaps(&(inv->CD),vector_get(mm_reads,j),CLONE_MEAN/2)){ mm_support++; } if(inv->CD.end1 < IDIS_VECTOR_GET(mm_reads,j)->start1){ break; } if(mm_support > MAX_SUPPORT){ break;} } inv->supports[0] = pp_support; inv->supports[1] = mm_support; } void update_inversion_supports(vector_t *inversions, vector_t *pp_reads, vector_t *mm_reads){ int i,j; int pp_support; int mm_support; int midAB; int midCD; qsort(pp_reads->items,pp_reads->size,sizeof(interval_discordant *),interval_pair_comp); qsort(mm_reads->items,mm_reads->size,sizeof(interval_discordant *),interval_pair_comp); for(i=0;i<inversions->size;i++){ pp_support = 0; mm_support = 0; midAB = scl_binary_search(pp_reads,&(SV_VECTOR_GET(inversions,i)->AB)); midCD = scl_binary_search(mm_reads,&(SV_VECTOR_GET(inversions,i)->CD)); for(j=midAB;j<pp_reads->size;j++){ if(interval_pair_overlaps(&(SV_VECTOR_GET(inversions,i)->AB),vector_get(pp_reads,j),CLONE_MEAN)){ pp_support++; } if(SV_VECTOR_GET(inversions,i)->AB.end1 < IDIS_VECTOR_GET(pp_reads,j)->start1){ break; } if(pp_support > MAX_SUPPORT){ break;} } for(j=midCD;j<mm_reads->size;j++){ if(interval_pair_overlaps(&(SV_VECTOR_GET(inversions,i)->CD),vector_get(mm_reads,j),CLONE_MEAN)){ mm_support++; } if(SV_VECTOR_GET(inversions,i)->CD.end1 < IDIS_VECTOR_GET(mm_reads,j)->start1){ break; } if(mm_support > MAX_SUPPORT){ break;} } SV_VECTOR_GET(inversions,i)->supports[0] = pp_support; SV_VECTOR_GET(inversions,i)->supports[1] = mm_support; } } void update_sv_supports_b(vector_t *svs, bam_vector_pack *reads){ int i; qsort(reads->pp_discordants->items,reads->pp_discordants->size,sizeof(interval_discordant *),interval_pair_comp); qsort(reads->mm_discordants->items,reads->mm_discordants->size,sizeof(interval_discordant *),interval_pair_comp); qsort(reads->pm_discordants->items,reads->pm_discordants->size,sizeof(interval_discordant *),interval_pair_comp); qsort(reads->mp_discordants->items,reads->mp_discordants->size,sizeof(interval_discordant *),interval_pair_comp); for(i=0;i<svs->size;i++){ sv_t *sv = vector_get(svs,i); switch(sv->type){ case SV_INVERSION: update_inversion_supports_b(sv,reads->pp_discordants,reads->mm_discordants); break; case SV_DIRECT_DUPLICATION: update_duplication_supports_b(sv,reads->pm_discordants,reads->mp_discordants); break; case SV_INVERTED_DUPLICATION: update_duplication_supports_b(sv,reads->pp_discordants,reads->mm_discordants); break; case SV_TANDEM_DUPLICATION: update_tandem_duplication_supports_b(sv,reads->mp_discordants); break; case SV_TRANSLOCATION: update_duplication_supports_b(sv,reads->pm_discordants,reads->mp_discordants); break; case SV_INVERTED_TRANSLOCATION: update_duplication_supports_b(sv,reads->pp_discordants,reads->mm_discordants); break; case SV_DELETION: update_deletion_supports_b(sv,reads->pm_discordants); break; default: fprintf(stderr,"Unknown SV type ordinal %d\n",sv->type); VALOR_LOG("Unknown SV type ordinal %d\n",sv->type); exit(-1); } } } void update_sv_supports(vector_t *svs, bam_vector_pack *reads ,sv_type type){ switch(type){ case SV_INVERSION: update_inversion_supports(svs,reads->pp_discordants,reads->mm_discordants); break; case SV_DIRECT_DUPLICATION: update_duplication_supports(svs,reads->pm_discordants,reads->mp_discordants); break; case SV_INVERTED_DUPLICATION: update_duplication_supports(svs,reads->pp_discordants,reads->mm_discordants); break; case SV_DELETION: update_deletion_supports(svs,reads->pm_discordants); break; case SV_TRANSLOCATION: update_duplication_supports(svs,reads->pm_discordants,reads->mp_discordants); break; case SV_INVERTED_TRANSLOCATION: update_duplication_supports(svs,reads->pp_discordants,reads->mm_discordants); break; default: fprintf(stderr,"Unknown SV type ordinal %d\n",type); VALOR_LOG("Unknown SV type ordinal %d\n",type); exit(-1); } } //TODO is this revers? splitmolecule_t *inversion_reduce_breakpoints(sv_t *inv){ splitmolecule_t *sc = getMem(sizeof(splitmolecule_t)); sc->start1 = inv->AB.end1; sc->end1 = inv->CD.start1; sc->start2 = inv->AB.end2; sc->end2 = inv->CD.start2; return sc; } splitmolecule_t *duplication_reduce_breakpoints(sv_t *dup){ splitmolecule_t *sc = getMem(sizeof(splitmolecule_t)); if(dup->orientation == DUP_FORW_COPY){ sc->start1 = dup->AB.start1; sc->end1 = dup->CD.end1; sc->start2 = dup->AB.end2; sc->end2 = dup->CD.start2; } else if(dup->orientation == DUP_BACK_COPY){ sc->start1 = dup->AB.start2; sc->end1 = dup->CD.end2; sc->start2 = dup->AB.end1;; sc->end2 = dup->CD.start1; } return sc; } splitmolecule_t *inverted_duplication_reduce_breakpoints(sv_t *dup){ splitmolecule_t *sc = getMem(sizeof(splitmolecule_t)); if(dup->orientation == DUP_FORW_COPY){ sc->start1 = dup->AB.start1; sc->end1 = dup->CD.end1; sc->start2 = dup->CD.end2; sc->end2 = dup->AB.start2; } else if(dup->orientation == DUP_BACK_COPY){ sc->start2 = dup->AB.end1; sc->end2 = dup->CD.start1; sc->start1 = dup->CD.start2; sc->end1 = dup->AB.end2; } return sc; } splitmolecule_t *tandem_duplication_reduce_breakpoints(sv_t *sv){ splitmolecule_t *sc = getMem(sizeof(splitmolecule_t)); sc->start1 = sv->AB.start1; sc->end1 = sv->AB.end1; sc->start2 = sv->AB.start2; sc->end2 = sv->AB.end2; return sc; } splitmolecule_t *deletion_reduce_breakpoints(sv_t *sv){ splitmolecule_t *sc = getMem(sizeof(splitmolecule_t)); sc->start1 = sv->AB.start1; sc->start2 = sv->AB.start2; sc->end1 = sv->AB.end1; sc->end2 = sv->AB.end2; return sc; } splitmolecule_t *sv_reduce_breakpoints(sv_t *sv){ switch(sv->type){ case SV_INVERSION: return inversion_reduce_breakpoints(sv); case SV_DIRECT_DUPLICATION: return duplication_reduce_breakpoints(sv); case SV_INVERTED_DUPLICATION: return inverted_duplication_reduce_breakpoints(sv); case SV_TRANSLOCATION: return duplication_reduce_breakpoints(sv); case SV_INVERTED_TRANSLOCATION: return inverted_duplication_reduce_breakpoints(sv); case SV_DELETION: return deletion_reduce_breakpoints(sv); case SV_TANDEM_DUPLICATION: return tandem_duplication_reduce_breakpoints(sv); default: fprintf(stderr, "SV type of unknown ordinal %d!\n",sv->type); exit(-1); ; } } int inversion_is_proper(sv_t *sv){ sonic *snc = sonic_load(NULL); //bam_info *in_bams = get_bam_info(NULL); parameters *params = get_params(); int chr = sv->chr; fprintf(logFile,"%s\t%d\t%d\t",snc->chromosome_names[chr],sv->AB.start1,sv->CD.end1); double ploidy = params->chr_copy_count[chr]; if( params->filter_satellite && sonic_is_satellite(snc,snc->chromosome_names[chr],sv->AB.start1,sv->CD.end1)){ fprintf(logFile,"sat 5'\n"); return 0; } if( params->filter_satellite && sonic_is_satellite(snc,snc->chromosome_names[chr],sv->AB.start2,sv->CD.end2)){ fprintf(logFile,"sat 3'\n"); return 0; } if( sv->supports[0] < INVERSION_MIN_REQUIRED_SUPPORT / ploidy){ fprintf(logFile,"sup 5'\n"); return 0; } if( sv->supports[1] < INVERSION_MIN_REQUIRED_SUPPORT / ploidy){ fprintf(logFile,"sup 3'\n"); return 0; } if(sonic_is_gap(snc, snc->chromosome_names[chr], sv->AB.start1-CLONE_MEAN/2, sv->CD.end1+CLONE_MEAN/2) || sonic_is_gap(snc, snc->chromosome_names[chr], sv->AB.start2-CLONE_MEAN/2, sv->CD.end2+CLONE_MEAN/2)){ fprintf(logFile,"Gap\n"); return 0; } if(sonic_is_gap(snc, snc->chromosome_names[chr], sv->AB.start1, sv->CD.end2)){ int _start = sv->AB.start1; int _end = sv->CD.end2; sonic_interval *interval = sonic_intersect(snc,snc->chromosome_names[chr],_start,_end,SONIC_GAP); if(((double)interval->end-interval->start)/((double)_end-_start)>0.25){ fprintf(logFile,"Gap%%\n"); return 0; } } fprintf( logFile, "Call\n"); return 1; } int invert_duplication_is_proper(sv_t *sv){ parameters *params = get_params(); sonic *snc = sonic_load(NULL); bam_info *in_bams = get_bam_info(NULL); int start = -1; int end = -1; int target_start = -1; int target_end = -1; int chr = sv->chr; double ploidy = params->chr_copy_count[chr]; if( sv->supports[0]<DUPLICATION_MIN_REQUIRED_SUPPORT/(ploidy)){ return 0; } if( sv->supports[1]<DUPLICATION_MIN_REQUIRED_SUPPORT/(ploidy)){ return 0; } if(sv->orientation ==DUP_FORW_COPY){ target_start = sv->AB.start2; target_end = sv->CD.end2; start = sv->CD.start1; end = sv->AB.end1; }else if(sv->orientation == DUP_BACK_COPY){ target_start = sv->AB.start1; target_end = sv->CD.end1; start = sv->CD.start2; end = sv->AB.end2; } if(target_start > target_end){ // int temp = target_start; // target_start = target_end; // target_end = temp; int avg = (target_start - target_end)/2 + target_end; target_start = avg - CLONE_MEAN /2; target_end = avg + CLONE_MEAN /2; } int is_ref_dup_source = sonic_is_segmental_duplication(snc,snc->chromosome_names[chr],start,end); int is_ref_dup_target = sonic_is_segmental_duplication(snc,snc->chromosome_names[chr],target_start,target_end); int is_ref_gap_source = params->filter_gap && sonic_is_gap(snc,snc->chromosome_names[chr],start,end); int is_ref_gap_target = params->filter_gap && sonic_is_gap(snc,snc->chromosome_names[chr],target_start,target_end); int is_ref_sat_source = params->filter_satellite && sonic_is_satellite(snc,snc->chromosome_names[chr],start,end); int is_ref_sat_target = params->filter_satellite && sonic_is_satellite(snc,snc->chromosome_names[chr],target_start,target_end); int does_cnv_support_dup; // does_cnv_support_dup= get_depth_region(in_bams->depths[chr],start,end) > (3.0/ploidy) * in_bams->depth_mean[chr] - (3.0/ploidy) * in_bams->depth_std[chr]; does_cnv_support_dup= get_depth_region(in_bams->depths[chr],start,end) > in_bams->depth_mean[chr] + (3.0/ploidy) * in_bams->depth_std[chr]; return !((is_ref_dup_source && is_ref_dup_target) || !does_cnv_support_dup || is_ref_gap_source || is_ref_gap_target || is_ref_sat_source || is_ref_sat_target); } int direct_duplication_is_proper(sv_t *sv){ parameters *params = get_params(); sonic *snc = sonic_load(NULL); bam_info *in_bams = get_bam_info(NULL); int start = -1; int end = -1; int target_start = -1; int target_end = -1; int chr = sv->chr; double ploidy = params->chr_copy_count[chr]; if( sv->supports[0]<DUPLICATION_MIN_REQUIRED_SUPPORT/(ploidy)){ return 0; } if( sv->supports[1]<DUPLICATION_MIN_REQUIRED_SUPPORT/( ploidy)){ return 0; } if(sv->orientation ==DUP_FORW_COPY){ target_start = sv->AB.end2; target_end = sv->CD.start2; start = sv->AB.start1; end = sv->CD.end1; }else if(sv->orientation == DUP_BACK_COPY){ target_start = sv->AB.end1; target_end = sv->CD.start1; start = sv->AB.start2; end = sv->CD.end2; } if(target_start > target_end){ // int temp = target_start; // target_start = target_end; // target_end = temp; int avg = (target_start - target_end)/2 + target_end; target_start = avg - CLONE_MEAN /2; target_end = avg + CLONE_MEAN /2; } int is_ref_dup_source = sonic_is_segmental_duplication(snc,snc->chromosome_names[chr],start,end); int is_ref_dup_target = sonic_is_segmental_duplication(snc,snc->chromosome_names[chr],target_start,target_end); int is_ref_gap_source = params->filter_gap && sonic_is_gap(snc,snc->chromosome_names[chr],start,end); int is_ref_gap_target = params->filter_gap && sonic_is_gap(snc,snc->chromosome_names[chr],target_start,target_end); int is_ref_sat_source = params->filter_satellite && sonic_is_satellite(snc,snc->chromosome_names[chr],start,end); int is_ref_sat_target = params->filter_satellite && sonic_is_satellite(snc,snc->chromosome_names[chr],target_start,target_end); int does_cnv_support_dup= get_depth_region(in_bams->depths[chr],start,end) > in_bams->depth_mean[chr] + (3.0/ploidy) * in_bams->depth_std[chr]; // int does_cnv_support_dup= get_depth_region(in_bams->depths[chr],start,end) > (3.0/ploidy) * in_bams->depth_mean[chr] - (3.0/ploidy) * in_bams->depth_std[chr]; fprintf(logFile,"%s\t%d\t%d\t%s\t%d\t%d\t%s\n", snc->chromosome_names[chr],start,end, snc->chromosome_names[chr],target_start,target_end, sv_type_name( sv->type)); fprintf(logFile,"%lf\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n" ,get_depth_region(in_bams->depths[chr],start,end),does_cnv_support_dup,is_ref_dup_source, is_ref_dup_target, is_ref_gap_source, is_ref_gap_target, is_ref_sat_source, is_ref_sat_target); return !((is_ref_dup_source && is_ref_dup_target) || !does_cnv_support_dup || is_ref_gap_source || is_ref_gap_target || is_ref_sat_source || is_ref_sat_target); } int deletion_is_proper(sv_t *sv){ sonic *snc = sonic_load(NULL); bam_info *in_bams = get_bam_info(NULL); parameters *params = get_params(); int chr = sv->chr; double ploidy = params->chr_copy_count[chr]; if( params->filter_gap && sonic_is_gap(snc,snc->chromosome_names[chr], sv->AB.start1, sv->AB.end2)){ return 0; } if( sv->supports[0] < DELETION_MIN_REQUIRED_SUPPORT/ploidy){ return 0; } if( get_depth_region(in_bams->depths[chr],sv->AB.end1,sv->AB.start2) > (1 - 1.0 /ploidy) * in_bams->depth_mean[chr] + (3.0 / ploidy) * in_bams->depth_std[chr]){ return 0; } return 1; } int sv_is_proper(void *vsv){ sv_t *sv = vsv; switch(sv->type){ case SV_DELETION: return deletion_is_proper(sv); case SV_INVERSION: return inversion_is_proper(sv); case SV_DIRECT_DUPLICATION: return direct_duplication_is_proper(sv); case SV_INVERTED_DUPLICATION: return invert_duplication_is_proper(sv); default: fprintf(stderr,"Sv type with ordinal %d is not implemented", (int)sv->type); exit(-1); } }
full_matrix.h
/* Copyright (c) 2020, VSB - Technical University of Ostrava and Graz University of Technology All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the names of VSB - Technical University of Ostrava and Graz University of Technology nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL VSB - TECHNICAL UNIVERSITY OF OSTRAVA AND GRAZ UNIVERSITY OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file full_matrix.h * @brief */ #ifndef INCLUDE_BESTHEA_FULL_MATRIX_H_ #define INCLUDE_BESTHEA_FULL_MATRIX_H_ #include "besthea/matrix.h" #include "besthea/settings.h" #include <iostream> #include <mkl.h> #include <vector> namespace besthea { namespace linear_algebra { class full_matrix; } } /** * Class representing a full matrix. */ class besthea::linear_algebra::full_matrix : public besthea::linear_algebra::matrix { public: using vector_type = besthea::linear_algebra::vector; //!< Vector type. /** * Default constructor. */ full_matrix( ); /** * Copy constructor. * @param[in] that Matrix to be deep copied. */ full_matrix( const full_matrix & that ); /** * Constructor with an initializer list. * @param[in] n_rows Number of rows. * @param[in] n_columns Number of columns. * @param[in] list Initializer list for std::vector. */ full_matrix( lo n_rows, lo n_columns, std::initializer_list< sc > list ); /** * Constructing a matrix of the given dimension. * @param[in] n_rows Number of rows. * @param[in] n_columns Number of columns. * @param[in] zero Initialize to 0 if true. */ full_matrix( lo n_rows, lo n_columns, bool zero = true ); /** * Destructor. */ virtual ~full_matrix( ); /*! * @brief Prints the matrix. * @param[in] stream */ void print( std::ostream & stream = std::cout ) const; /*! * @brief Fills the matrix with the given value. * @param[in] value */ void fill( sc value ) { std::fill( _data.begin( ), _data.end( ), value ); } /*! * @brief Fills the diagonal of the matrix with the given value. * @param[in] value */ void fill_diag( sc value ); /*! * @brief Fills the matrix with random numbers (uniform distribution). * @param[in] lower Lower bound. * @param[in] upper Upper bound. */ void random_fill( sc lower, sc upper ); /*! * @brief Fills the matrix diagonal with random numbers (uniform * distribution). * @param[in] lower Lower bound. * @param[in] upper Upper bound. */ void random_fill_diag( sc lower, sc upper ); /*! * @brief Returns the (i,j)-th element of the matrix. * @param[in] i Row index. * @param[in] j Column index. */ sc get( lo i, lo j ) const { return _data[ i + j * _n_rows ]; } /*! * @brief Sets the (i,j)-th element of the matrix. * @param[in] i Row index. * @param[in] j Column index. * @param[in] value Value to be set. */ void set( lo i, lo j, sc value ) { _data[ i + j * _n_rows ] = value; } /*! * @brief Adds value to the (i,j)-th element of the matrix. * @param[in] i Row index. * @param[in] j Column index. * @param[in] value Value to be set. */ void add( lo i, lo j, sc value ) { _data[ i + j * _n_rows ] += value; } /*! * @brief Atomically adds value to the (i,j)-th element of the matrix. * @param[in] i Row index. * @param[in] j Column index. * @param[in] value Value to be set. */ void add_atomic( lo i, lo j, sc value ) { #pragma omp atomic update _data[ i + j * _n_rows ] += value; } /*! * @brief Overloads the () operator. * @param[in] i Row index. * @param[in] j Column index. */ sc & operator( )( lo i, lo j ) { return _data[ i + j * _n_rows ]; } /*! * @brief Overloads the () operator. * @param[in] i Row index. * @param[in] j Column index. */ sc operator( )( lo i, lo j ) const { return _data[ i + j * _n_rows ]; } /*! * @brief Returns the raw data. */ sc * data( ) { return _data.data( ); } /*! * @brief Returns the raw data. */ const sc * data( ) const { return _data.data( ); } /*! * @brief y = beta * y + alpha * (this)^trans * x. * @param[in] x * @param[in,out] y * @param[in] trans Flag for transpose. * @param[in] alpha * @param[in] beta */ virtual void apply( const vector_type & x, vector_type & y, bool trans = false, sc alpha = 1.0, sc beta = 0.0 ) const; /*! * @brief y = beta * y + alpha * this * x. * @param[in] x * @param[in,out] y * @param[in] alpha * @param[in] beta */ void apply_symmetric( vector const & x, vector_type & y, sc alpha = 1.0, sc beta = 0.0 ) const; /*! * @brief C = alpha * A * B + beta * C, where C is this matrix * @param[in] A * @param[in] B * @param[in] trans_A * @param[in] trans_B * @param[in] alpha * @param[in] beta */ void multiply( full_matrix const & A, full_matrix const & B, bool trans_A = false, bool trans_B = false, sc alpha = 1.0, sc beta = 0.0 ); /*! * @brief In-place LU decomposition and solution. * @param[in,out] rhs Right-hand side overwritten by the result. * @param[in] n_rhs Number of right-hand sides. * @param[in] trans Flag for transpose. */ void lu_decompose_solve( vector_type & rhs, lo n_rhs = 1, bool trans = false ); /*! * @brief In-place Cholesky decomposition and solution. * @param[in,out] rhs Right-hand side overwritten by the result. * @param[in] n_rhs Number of right-hand sides. */ void cholesky_decompose_solve( vector_type & rhs, lo n_rhs = 1 ); /*! * @brief In-place Cholesky decomposition. */ void cholesky_decompose( ); /*! * @brief Cholesky solution * @param[in,out] rhs Right-hand side overwritten by the result. * @param[in] n_rhs Number of right-hand sides. */ void cholesky_solve( vector_type & rhs, lo n_rhs = 1 ); /*! * Resizes the matrix. * @param[in] n_rows Number of rows. * @param[in] n_columns Number of columns. */ void resize( lo n_rows, lo n_columns ) { _data.resize( n_rows * n_columns ); _data.shrink_to_fit( ); _n_rows = n_rows; _n_columns = n_columns; } protected: std::vector< sc, besthea::allocator_type< sc > > _data; //!< Raw data. }; #endif /* INCLUDE_BESTHEA_FULL_MATRIX_H_ */
GxB_Matrix_serialize.c
//------------------------------------------------------------------------------ // GxB_Matrix_serialize: copy a matrix into a serialized array of bytes //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // serialize a GrB_Matrix into a blob of bytes // This method is similar to GrB_Matrix_serialize. In contrast with the GrB* // method, this method allocates the blob itself, and hands over the allocated // space to the user application. The blob must be freed by the same free // function passed in to GxB_init, or by the ANSI C11 free() if GrB_init was // used. On input, the blob_size need not be initialized; it is returned as // the size of the blob as allocated. // This method also includes the descriptor as the last parameter, which allows // for the compression method to be selected, and controls the # of threads // used to create the blob. Example usage: /* void *blob = NULL ; GrB_Index blob_size = 0 ; GrB_Matrix A, B = NULL ; // construct a matrix A, then serialized it: GxB_Matrix_serialize (&blob, &blob_size, A, NULL) ; // GxB mallocs the blob GxB_Matrix_deserialize (&B, atype, blob, blob_size, NULL) ; free (blob) ; // user frees the blob */ #include "GB.h" #include "GB_serialize.h" GrB_Info GxB_Matrix_serialize // serialize a GrB_Matrix to a blob ( // output: void **blob_handle, // the blob, allocated on output GrB_Index *blob_size_handle, // size of the blob on output // input: GrB_Matrix A, // matrix to serialize const GrB_Descriptor desc // descriptor to select compression method // and to control # of threads used ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GxB_Matrix_serialize (&blob, &blob_size, A, desc)") ; GB_BURBLE_START ("GxB_Matrix_serialize") ; GB_RETURN_IF_NULL (blob_handle) ; GB_RETURN_IF_NULL (blob_size_handle) ; GB_RETURN_IF_NULL_OR_FAULTY (A) ; GB_GET_DESCRIPTOR (info, desc, xx1, xx2, xx3, xx4, xx5, xx6, xx7) ; // get the compression method from the descriptor int method = (desc == NULL) ? GxB_DEFAULT : desc->compression ; //-------------------------------------------------------------------------- // serialize the matrix //-------------------------------------------------------------------------- (*blob_handle) = NULL ; size_t blob_size = 0 ; info = GB_serialize ((GB_void **) blob_handle, &blob_size, A, method, Context) ; (*blob_size_handle) = (GrB_Index) blob_size ; GB_BURBLE_END ; #pragma omp flush return (info) ; }
main.c
#include "main.h" inline uint64_t linear_sample(struct RNG_state* seed) { state saved_state[6]; state state; union Register saved_cipher[5]; // printf("START SAMPLING !!! \n"); // PRINT(saved_state,saved_cipher); RAND(state,seed); COPY(saved_state[0], state); // printf("======================================================\n"); // STATE(state); // printf("======================================================\n"); // PRINT(saved_state,saved_cipher); ENCR(&saved_cipher[0], state); COPY(saved_state[1], state); // printf("======================================================\n"); // STATE(state); // printf("======================================================\n"); // PRINT(saved_state,saved_cipher); ENCR(&saved_cipher[1], state); COPY(saved_state[2], state); // printf("======================================================\n"); // STATE(state); // printf("======================================================\n"); // PRINT(saved_state,saved_cipher); ENCR(&saved_cipher[2], state); COPY(saved_state[3], state); // printf("======================================================\n"); // STATE(state); // printf("======================================================\n"); // PRINT(saved_state,saved_cipher); ENCR(&saved_cipher[3], state); COPY(saved_state[4], state); // printf("======================================================\n"); // STATE(state); // printf("======================================================\n"); // PRINT(saved_state,saved_cipher); ENCR(&saved_cipher[4], state); COPY(saved_state[5], state); // printf("======================================================\n"); // STATE(state); // printf("======================================================\n"); // PRINT(saved_state,saved_cipher); // printf("STOP SAMPLING !!! \n"); return MASK(saved_state, saved_cipher); } int main(int argc, char const *argv[]) { struct Properties *prop; struct RNG_state *seed; uint32_t small_seed = 0; uint64_t num = 1; int64_t res = 0; int64_t inbalance = 0; uint64_t bias = 0; uint64_t i = 0; uint32_t tid; uint8_t progress = 0; uint8_t t_progress = 0; uint32_t shift = 0; srand((uint32_t) time(NULL)); small_seed = (uint32_t) rand(); prop = read_args(argc, argv); shift = (WEIGHT * 2 + 5 - ilog2(prop->num_proc_to_use)); num <<= shift; # pragma omp parallel private(i, tid, seed) shared(num) reduction(+:bias,inbalance) { tid = (uint32_t) omp_get_thread_num(); seed = init_aesrand_r(small_seed^tid,tid); #pragma omp parallel for private(i) shared(bias,inbalance) shared(num) for(i = 0 ; i < num; ++i) { res = linear_sample(seed); inbalance += 1 - 2*res; bias += res; if(tid == 0) { t_progress = ((i << 7) >> shift); if( progress < t_progress) { printf("\rProgress (over 128): %u%%", progress); progress = t_progress; } } } } num = num * prop->num_proc_to_use; print_results(WEIGHT, num, inbalance, bias); }
regexredux.c
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org/ // // Contributed by Jeremy Zerfas #include <stdlib.h> #include <stdio.h> #include <string.h> #include <pcre.h> typedef struct { char * data; int capacity, size; } string; // Function for searching a src_String for a pattern, replacing it with some // specified replacement, and storing the result in dst_String. static void replace(char const * const pattern, char const * const replacement , string const * const src_String, string * const dst_String , pcre_jit_stack * const stack){ char const * error; int offset, pos=0, match[3]; int const replacement_Size=strlen(replacement); // Compile and study pattern. pcre * regex=pcre_compile(pattern, 0, &error, &offset, NULL); pcre_extra * aid=pcre_study(regex, PCRE_STUDY_JIT_COMPILE, &error); // Find each match of the pattern in src_String and append the characters // preceding each match and the replacement text to dst_String. while(pcre_jit_exec(regex, aid, src_String->data, src_String->size , pos, 0, match, 3, stack)>=0){ // Allocate more memory for dst_String if there is not enough space for // the characters preceding the match and the replacement text. while(dst_String->size+match[0]-pos+replacement_Size >dst_String->capacity) dst_String->data=realloc(dst_String->data, dst_String->capacity*=2); // Append the characters preceding the match and the replacement text to // dst_String and update the size of dst_String. memcpy(dst_String->data+dst_String->size, src_String->data+pos , match[0]-pos); memcpy(dst_String->data+dst_String->size+match[0]-pos, replacement , replacement_Size); dst_String->size+=match[0]-pos+replacement_Size; // Update pos to continue searching after the current match. pos=match[1]; } pcre_free_study(aid); pcre_free(regex); // Allocate more memory for dst_String if there is not enough space for // the characters following the last match (or the entire src_String if // there was no match). while(dst_String->size+src_String->size-pos>dst_String->capacity) dst_String->data=realloc(dst_String->data, dst_String->capacity*=2); // Append the characters following the last match (or the entire src_String // if there was no match) to dst_String and update the size of dst_String. memcpy(dst_String->data+dst_String->size, src_String->data+pos , src_String->size-pos); dst_String->size+=src_String->size-pos; } int main(void){ char const * const count_Info[]={ "agggtaaa|tttaccct", "[cgt]gggtaaa|tttaccc[acg]", "a[act]ggtaaa|tttacc[agt]t", "ag[act]gtaaa|tttac[agt]ct", "agg[act]taaa|ttta[agt]cct", "aggg[acg]aaa|ttt[cgt]ccct", "agggt[cgt]aa|tt[acg]accct", "agggta[cgt]a|t[acg]taccct", "agggtaa[cgt]|[acg]ttaccct" }, * const replace_Info[][2]={ {"tHa[Nt]", "<4>"}, {"aND|caN|Ha[DS]|WaS", "<3>"}, {"a[NSt]|BY", "<2>"}, {"<[^>]*>", "|"}, {"\\|[^|][^|]*\\|", "-"} }; string input={malloc(16384), 16384}, sequences={malloc(16384), 16384}; int postreplace_Size; // Read in input from stdin until we reach the end or encounter an error. for(int bytes_Read; (bytes_Read=fread(input.data+input.size, 1, input.capacity-input.size , stdin))>0;) // Update the size of input to reflect the newly read input and if // we've reached the full capacity of the input string then also double // its size. if((input.size+=bytes_Read)==input.capacity) input.data=realloc(input.data, input.capacity*=2); #pragma omp parallel { pcre_jit_stack * const stack=pcre_jit_stack_alloc(16384, 16384); // Find all sequence descriptions and new lines in input, replace them // with empty strings, and store the result in the sequences string. #pragma omp single { replace(">.*\\n|\\n", "", &input, &sequences, stack); free(input.data); } // Have one thread start working on performing all the replacements // serially. #pragma omp single nowait { // We'll use two strings when doing all the replacements, searching // for patterns in prereplace_String and using postreplace_String to // store the string after the replacements have been made. After // each iteration these two then get swapped. Start out with both // strings having the same capacity as the sequences string and also // copy the sequences string into prereplace_String for the initial // iteration. string prereplace_String={ malloc(sequences.capacity), sequences.capacity, sequences.size }, postreplace_String={ malloc(sequences.capacity), sequences.capacity }; memcpy(prereplace_String.data, sequences.data, sequences.size); // Iterate through all the replacement patterns and their // replacements in replace_Info[]. for(int i=0; i<sizeof(replace_Info)/sizeof(char * [2]); i++){ replace(replace_Info[i][0], replace_Info[i][1] , &prereplace_String, &postreplace_String, stack); // Swap prereplace_String and postreplace_String in preparation // for the next iteration. string const temp=prereplace_String; prereplace_String=postreplace_String; postreplace_String=temp; postreplace_String.size=0; } // If any replacements were made, they'll be in prereplace_String // instead of postreplace_String because of the swap done after each // iteration. Copy its size to postreplace_Size. postreplace_Size=prereplace_String.size; free(prereplace_String.data); free(postreplace_String.data); } // Iterate through all the count patterns in count_Info[] and perform // the counting for each one on a different thread if available. #pragma omp for schedule(dynamic) ordered for(int i=0; i<sizeof(count_Info)/sizeof(char *); i++){ char const * error; int offset, pos=0, match[3], count=0; // Compile and study pattern. pcre * regex=pcre_compile(count_Info[i], 0, &error, &offset, NULL); pcre_extra * aid=pcre_study(regex, PCRE_STUDY_JIT_COMPILE, &error); // Find each match of the pattern in the sequences string and // increment count for each match. while(pcre_jit_exec(regex, aid, sequences.data, sequences.size , pos, 0, match, 3, stack)>=0){ count++; // Update pos to continue searching after the current match. pos=match[1]; } pcre_free_study(aid); pcre_free(regex); // Print the count for each pattern in the correct order. #pragma omp ordered printf("%s %d\n", count_Info[i], count); } pcre_jit_stack_free(stack); } free(sequences.data); // Print the size of the original input, the size of the input without the // sequence descriptions & new lines, and the size after having made all the // replacements. printf("\n%d\n%d\n%d\n", input.size, sequences.size, postreplace_Size); return 0; } /* notes, command-line, and program output NOTES: 64-bit Ubuntu quad core gcc (Ubuntu 6.3.0-12ubuntu2) 6.3.0 20170406 Fri, 14 Apr 2017 16:26:34 GMT MAKE: /usr/bin/gcc -pipe -Wall -O3 -fomit-frame-pointer -march=native -fopenmp regexredux.gcc-4.c -o regexredux.gcc-4.gcc_run -lpcre rm regexredux.gcc-4.c 0.51s to complete and log all make actions COMMAND LINE: ./regexredux.gcc-4.gcc_run 0 < regexredux-input5000000.txt PROGRAM OUTPUT: agggtaaa|tttaccct 356 [cgt]gggtaaa|tttaccc[acg] 1250 a[act]ggtaaa|tttacc[agt]t 4252 ag[act]gtaaa|tttac[agt]ct 2894 agg[act]taaa|ttta[agt]cct 5435 aggg[acg]aaa|ttt[cgt]ccct 1537 agggt[cgt]aa|tt[acg]accct 1431 agggta[cgt]a|t[acg]taccct 1608 agggtaa[cgt]|[acg]ttaccct 2178 50833411 50000000 27388361 */
eavlDestinationTopologyScatterMapOp.h
// Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information. #ifndef EAVL_DESTINATION_TOPOLOGY_SCATTER_MAP_OP_H #define EAVL_DESTINATION_TOPOLOGY_SCATTER_MAP_OP_H #include "eavlCUDA.h" #include "eavlCellSet.h" #include "eavlCellSetExplicit.h" #include "eavlCellSetAllStructured.h" #include "eavlDataSet.h" #include "eavlArray.h" #include "eavlOpDispatch.h" #include "eavlOperation.h" #include "eavlTopology.h" #include "eavlException.h" #include <time.h> #ifdef HAVE_OPENMP #include <omp.h> #endif #ifndef DOXYGEN template <class CONN> struct eavlDestinationTopologyScatterMapOp_CPU { static inline eavlArray::Location location() { return eavlArray::HOST; } template <class F, class IN, class OUT, class INDEX> static void call(int nitems, CONN &conn, const IN inputs, OUT outputs, INDEX indices, F &functor) { int *sparseindices = get<0>(indices).array; int ids[MAX_LOCAL_TOPOLOGY_IDS]; #pragma omp parallel for private(ids) for (int denseindex = 0; denseindex < nitems; ++denseindex) { int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)]; int nids; int shapeType = conn.GetElementComponents(sparseindex, nids, ids); collect(sparseindex, outputs) = functor(shapeType, nids, ids, collect(denseindex, inputs)); } } }; #if defined __CUDACC__ template <class CONN, class F, class IN, class OUT, class INDEX> __global__ void eavlDestinationTopologyScatterMapOp_kernel(int nitems, CONN conn, const IN inputs, OUT outputs, INDEX indices, F functor) { int *sparseindices = get<0>(indices).array; const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; int ids[MAX_LOCAL_TOPOLOGY_IDS]; for (int denseindex = threadID; denseindex < nitems; denseindex += numThreads) { int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)]; int nids; int shapeType = conn.GetElementComponents(sparseindex, nids, ids); collect(sparseindex, outputs) = functor(shapeType, nids, ids, collect(denseindex, inputs)); } } template <class CONN> struct eavlDestinationTopologyScatterMapOp_GPU { static inline eavlArray::Location location() { return eavlArray::DEVICE; } template <class F, class IN, class OUT, class INDEX> static void call(int nitems, CONN &conn, const IN inputs, OUT outputs, INDEX indices, F &functor) { int numThreads = 256; dim3 threads(numThreads, 1, 1); dim3 blocks (32, 1, 1); eavlDestinationTopologyScatterMapOp_kernel<<< blocks, threads >>>(nitems, conn, inputs, outputs, indices, functor); CUDA_CHECK_ERROR(); } }; #endif #endif // **************************************************************************** // Class: eavlDestinationTopologyScatterMapOp // // Purpose: /// Map from one element in a mesh to the same element, with /// topological information passed along to the functor. /// In this scatter version of the operation, the inputs (on the destination) /// topology are sparsely indexed and the outputs are compacted, i.e. /// the outputs are densely indexed 0 to n-1. // // Programmer: Jeremy Meredith // Creation: August 1, 2013 // // Modifications: // **************************************************************************** template <class I, class O, class INDEX, class F> class eavlDestinationTopologyScatterMapOp : public eavlOperation { protected: eavlCellSet *cells; eavlTopology topology; I inputs; O outputs; INDEX indices; F functor; public: eavlDestinationTopologyScatterMapOp(eavlCellSet *c, eavlTopology t, I i, O o, INDEX ind, F f) : cells(c), topology(t), inputs(i), outputs(o), indices(ind), functor(f) { } virtual void GoCPU() { eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); eavlOpDispatch<eavlDestinationTopologyScatterMapOp_CPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, indices, functor); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlDestinationTopologyScatterMapOp_CPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, indices, functor); } } virtual void GoGPU() { #ifdef HAVE_CUDA eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); conn.shapetype.NeedOnDevice(); conn.connectivity.NeedOnDevice(); conn.mapCellToIndex.NeedOnDevice(); eavlOpDispatch<eavlDestinationTopologyScatterMapOp_GPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, indices, functor); conn.shapetype.NeedOnHost(); conn.connectivity.NeedOnHost(); conn.mapCellToIndex.NeedOnHost(); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlDestinationTopologyScatterMapOp_GPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, indices, functor); } #else THROW(eavlException,"Executing GPU code without compiling under CUDA compiler."); #endif } }; // helper function for type deduction template <class I, class O, class INDEX, class F> eavlDestinationTopologyScatterMapOp<I,O,INDEX,F> *new_eavlDestinationTopologyScatterMapOp(eavlCellSet *c, eavlTopology t, I i, O o, INDEX indices, F f) { return new eavlDestinationTopologyScatterMapOp<I,O,INDEX,F>(c,t,i,o,indices,f); } #endif
test.c
#include <stdio.h> #define M (1024*1024) #define BUFF_SIZE (1*M) #define N (8*BUFF_SIZE) int b[N]; int Test(int start, int size) { int i; int errors = 0; for(i=0; i<start; i++) b[i] = -1; for(i=start; i<size; i++) b[i] = i; for(i=size; i<N; i++) b[i] = -1; #pragma omp target parallel for { for(int i=start; i<size; i++) b[i] += 1; } for(i=0; i<start && errors<25; i++) { if (b[i] != -1) printf("%4i: before, got %d, expected %d, %d error\n", i, b[i], -1, ++errors); } for(i=start; i<size && errors<25; i++) { if (b[i] != i+1) printf("%4i: in, got %d, expected %d, %d error\n", i, b[i], i+1, ++errors); } for(i=size; i<N && errors<25; i++) { if (b[i] != -1) printf("%4i: after, got %d, expected %d, %d error\n", i, b[i], -1, ++errors); } if (errors>0) { printf("success with start %d, size %d (%d mod buff size)\n\n", start, size, size % BUFF_SIZE); } else { printf("%d errors with start %d, size %d (%d mod buff size)\n\n", errors, start, size, size % BUFF_SIZE); } return (errors>0); } int main() { int offset[] = {0, 1, 2, BUFF_SIZE/2, BUFF_SIZE-2, BUFF_SIZE-1}; int onum = 6; int errors = 0; for(int s1=0; s1<6; s1++) { for(int s2=0; s2<6; s2++) { errors += Test(offset[s1], N-offset[s2]); if (errors>20) { printf("abort due to errors\n"); return errors; } } } printf("finished with %d errors\n", errors); return errors; }
parallel_for_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}} #pragma omp parallel for // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}} #pragma omp parallel for foo void test_no_clause() { int i; #pragma omp parallel for for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp parallel for' must be a for loop}} #pragma omp parallel for ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel for for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp parallel for'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for linear(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} #pragma omp parallel for collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+1 {{defined as firstprivate}} #pragma omp parallel for collapse(2) firstprivate(i) for (i = 0; i < 16; ++i) // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+3 {{reduction variable must be shared}} // expected-error@+2 {{private variable cannot be reduction}} // expected-error@+1 {{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp parallel for private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel for private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel for private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp parallel for lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel for lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for firstprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for firstprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp parallel for firstprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for firstprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for firstprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel for lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
dropout-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../../engine/openmp.h" #include "../operator_common.h" #include "../mshadow_op.h" #if defined(USE_MKL) && defined(_OPENMP) #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // USE_MKL && _OPENMP namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { #if defined(USE_MKL) && defined(_OPENMP) static void bernoulli_generate(int n, double p, int* r) { const int seed = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn) const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); # pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } #endif // USE_MKL && _OPENMP struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp : public Operator { public: explicit DropoutOp(DropoutParam param) { this->pkeep_ = 1.0f - param.p; this->mode_ = param.mode; } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); if (ctx.is_train || mode_ == dropout::kAlways) { Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); #if !defined(__CUDACC__) && defined(USE_MKL) && defined(_OPENMP) DType* outptr = out.dptr_; DType* dataptr = data.dptr_; auto maskptr = reinterpret_cast<int*>(mask.dptr_); int count = mask.shape_[0]*mask.shape_[1]; bernoulli_generate(count, this->pkeep_, maskptr); const float pk_1 = 1.0f / pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { outptr[i] = dataptr[i] * maskptr[i] * pk_1; } #else Random<xpu> *prnd = ctx.requested[dropout::kRandom].get_random<xpu, real_t>(s); mask = tcast<DType>(F<mshadow_op::threshold>( prnd->uniform(mask.shape_), pkeep_) * (1.0f / pkeep_)); Assign(out, req[dropout::kOut], data * mask); #endif // USE_MKL && _OPENMP } else { Assign(out, req[dropout::kOut], F<mshadow_op::identity>(data)); } } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(out_grad.size(), 1U); CHECK_EQ(in_grad.size(), 1U); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); if (ctx.is_train || mode_ == dropout::kAlways) { #if !defined(__CUDACC__) && defined(USE_MKL) && defined(_OPENMP) DType* ingradptr = gdata.dptr_; DType* outgradptr = grad.dptr_; auto maskptr = reinterpret_cast<int*>(mask.dptr_); int count = mask.shape_[0]*mask.shape_[1]; const float pk_1 = 1.0f / pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i] * pk_1; } #else // USE_MKL && _OPENMP CHECK_EQ(grad.shape_.Size(), mask.shape_.Size()); Assign(gdata, req[dropout::kData], grad * mask); #endif // USE_MKL && _OPENMP } else { Assign(gdata, req[dropout::kData], F<mshadow_op::identity>(grad)); } } private: real_t pkeep_; int mode_; }; // class DropoutOp template<typename xpu> Operator *CreateOp(DropoutParam param, int dtype); #if DMLC_USE_CXX11 class DropoutProp : public OperatorProperty { public: void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override { param_.Init(kwargs); } std::map<std::string, std::string> GetParams() const override { return param_.__DICT__(); } bool InferShape(std::vector<TShape> *in_shape, std::vector<TShape> *out_shape, std::vector<TShape> *aux_shape) const override { using namespace mshadow; CHECK_EQ(in_shape->size(), 1U); const TShape &dshape = in_shape->at(0); if (dshape.ndim() == 0) return false; out_shape->clear(); out_shape->push_back(dshape); out_shape->push_back(dshape); return true; } bool InferType(std::vector<int> *in_type, std::vector<int> *out_type, std::vector<int> *aux_type) const override { CHECK_EQ(in_type->size(), 1U); int dtype = in_type->at(0); if (dtype == -1) { LOG(FATAL) << "input type to dropout is not specified."; return false; } size_t nout = this->ListOutputs().size(); out_type->clear(); for (size_t i = 0; i < nout; ++i) out_type->push_back(dtype); return true; } OperatorProperty* Copy() const override { auto ptr = new DropoutProp(); ptr->param_ = param_; return ptr; } std::string TypeString() const override { return "Dropout"; } std::vector<int> DeclareBackwardDependency( const std::vector<int> &out_grad, const std::vector<int> &in_data, const std::vector<int> &out_data) const override { return {out_grad[dropout::kOut], out_data[dropout::kMask]}; } std::vector<std::pair<int, void*> > BackwardInplaceOption( const std::vector<int> &out_grad, const std::vector<int> &in_data, const std::vector<int> &out_data, const std::vector<void*> &in_grad) const override { return {{out_grad[dropout::kOut], in_grad[dropout::kData]}}; } std::vector<std::pair<int, void*> > ForwardInplaceOption( const std::vector<int> &in_data, const std::vector<void*> &out_data) const override { return {{in_data[dropout::kData], out_data[dropout::kOut]}}; } std::vector<ResourceRequest> ForwardResource( const std::vector<TShape> &in_shape) const override { return {ResourceRequest::kRandom}; } int NumVisibleOutputs() const override { return 1; } int NumOutputs() const override { return 2; } std::vector<std::string> ListOutputs() const override { return {"output", "mask"}; } Operator* CreateOperator(Context ctx) const override { LOG(FATAL) << "Not Implemented"; return NULL; } Operator* CreateOperatorEx(Context ctx, std::vector<TShape> *in_shape, std::vector<int> *in_type) const override; private: DropoutParam param_; }; // class DropoutProp #endif // DMLC_USE_CXX11 } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
symmetry.h
// Copyright (c) 2013-2015 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file symmetry.h * * \brief Contains definition and partial implementation of sirius::Symmetry class. */ #ifndef __SYMMETRY_H__ #define __SYMMETRY_H__ extern "C" { #include <spglib.h> } #include "geometry3d.hpp" #include "constants.h" #include "utils.h" #include "gvec.hpp" namespace sirius { struct space_group_symmetry_descriptor { /// Rotational part of symmetry operation (fractional coordinates). matrix3d<int> R; /// Fractional translation. vector3d<double> t; /// Proper (+1) or improper (-1) rotation. int proper; /// Proper rotation matrix in Cartesian coordinates. matrix3d<double> rotation; vector3d<double> euler_angles; }; struct magnetic_group_symmetry_descriptor { space_group_symmetry_descriptor spg_op; int isym; /// Proper rotation matrix in Cartesian coordinates. matrix3d<double> spin_rotation; }; class Symmetry { private: /// Matrix of lattice vectors. /** Spglib requires this matrix to have a positively defined determinant. */ matrix3d<double> lattice_vectors_; matrix3d<double> inverse_lattice_vectors_; int num_atoms_; mdarray<double, 2> positions_; std::vector<int> types_; double tolerance_; /// Crystal structure descriptor returned by spglib. SpglibDataset* spg_dataset_; /// Symmetry table for atoms. /** For each atom ia and symmetry isym sym_table_(ia, isym) stores index of atom ja to which original atom * transforms under symmetry operation. */ mdarray<int, 2> sym_table_; std::vector<space_group_symmetry_descriptor> space_group_symmetry_; std::vector<magnetic_group_symmetry_descriptor> magnetic_group_symmetry_; /// Compute Euler angles corresponding to the proper rotation part of the given symmetry. vector3d<double> euler_angles(matrix3d<double> const& rot__) const; /// Generate rotation matrix from three Euler angles /** Euler angles \f$ \alpha, \beta, \gamma \f$ define the general rotation as three consecutive rotations: * - about \f$ \hat e_z \f$ through the angle \f$ \gamma \f$ (\f$ 0 \le \gamma < 2\pi \f$) * - about \f$ \hat e_y \f$ through the angle \f$ \beta \f$ (\f$ 0 \le \beta \le \pi \f$) * - about \f$ \hat e_z \f$ through the angle \f$ \alpha \f$ (\f$ 0 \le \gamma < 2\pi \f$) * * The total rotation matrix is defined as a product of three rotation matrices: * \f[ * R(\alpha, \beta, \gamma) = * \left( \begin{array}{ccc} \cos(\alpha) & -\sin(\alpha) & 0 \\ * \sin(\alpha) & \cos(\alpha) & 0 \\ * 0 & 0 & 1 \end{array} \right) * \left( \begin{array}{ccc} \cos(\beta) & 0 & \sin(\beta) \\ * 0 & 1 & 0 \\ * -\sin(\beta) & 0 & \cos(\beta) \end{array} \right) * \left( \begin{array}{ccc} \cos(\gamma) & -\sin(\gamma) & 0 \\ * \sin(\gamma) & \cos(\gamma) & 0 \\ * 0 & 0 & 1 \end{array} \right) = * \left( \begin{array}{ccc} \cos(\alpha) \cos(\beta) \cos(\gamma) - \sin(\alpha) \sin(\gamma) & * -\sin(\alpha) \cos(\gamma) - \cos(\alpha) \cos(\beta) \sin(\gamma) & * \cos(\alpha) \sin(\beta) \\ * \sin(\alpha) \cos(\beta) \cos(\gamma) + \cos(\alpha) \sin(\gamma) & * \cos(\alpha) \cos(\gamma) - \sin(\alpha) \cos(\beta) \sin(\gamma) & * \sin(\alpha) \sin(\beta) \\ * -\sin(\beta) \cos(\gamma) & * \sin(\beta) \sin(\gamma) & * \cos(\beta) \end{array} \right) * \f] */ matrix3d<double> rot_mtrx_cart(vector3d<double> euler_angles__) const; public: Symmetry(matrix3d<double>& lattice_vectors__, int num_atoms__, mdarray<double, 2>& positions__, mdarray<double, 2>& spins__, std::vector<int>& types__, double tolerance__); ~Symmetry() { spg_free_dataset(spg_dataset_); } inline int atom_symmetry_class(int ia__) { return spg_dataset_->equivalent_atoms[ia__]; } inline int spacegroup_number() { return spg_dataset_->spacegroup_number; } inline std::string international_symbol() { return spg_dataset_->international_symbol; } inline std::string hall_symbol() { return spg_dataset_->hall_symbol; } matrix3d<double> transformation_matrix() const { return matrix3d<double>(spg_dataset_->transformation_matrix); } vector3d<double> origin_shift() const { return vector3d<double>(spg_dataset_->origin_shift[0], spg_dataset_->origin_shift[1], spg_dataset_->origin_shift[2]); } inline int num_spg_sym() const { return static_cast<int>(space_group_symmetry_.size()); } inline space_group_symmetry_descriptor const& space_group_symmetry(int isym__) const { assert(isym__ >= 0 && isym__ < num_spg_sym()); return space_group_symmetry_[isym__]; } inline int num_mag_sym() const { return static_cast<int>(magnetic_group_symmetry_.size()); } inline magnetic_group_symmetry_descriptor const& magnetic_group_symmetry(int isym__) const { assert(isym__ >= 0 && isym__ < num_mag_sym()); return magnetic_group_symmetry_[isym__]; } inline int sym_table(int ia__, int isym__) const { return sym_table_(ia__, isym__); } void check_gvec_symmetry(Gvec const& gvec__, Communicator const& comm__) const; /// Symmetrize scalar function. /** The following operation is performed: * \f[ * f({\bf x}) = \frac{1}{N_{sym}} \sum_{{\bf \hat P}} f({\bf \hat P x}) * \f] * For the function expanded in plane-waves we have: * \f[ * f({\bf x}) = \frac{1}{N_{sym}} \sum_{{\bf \hat P}} \sum_{\bf G} e^{i{\bf G \hat P x}} f({\bf G}) * = \frac{1}{N_{sym}} \sum_{{\bf \hat P}} \sum_{\bf G} e^{i{\bf G (Rx + t)}} f({\bf G}) * = \frac{1}{N_{sym}} \sum_{{\bf \hat P}} \sum_{\bf G} e^{i{\bf G t}} e^{i{\bf G Rx}} f({\bf G}) * \f] * Now we do a mapping \f$ {\bf GR} \rightarrow \tilde {\bf G} \f$ and find expansion coefficients of the * symmetry transformed function: * \f[ * f(\tilde{\bf G}) = e^{i{\bf G t}} f({\bf G}) * \f] */ void symmetrize_function(double_complex* f_pw__, Gvec const& gvec__, Communicator const& comm__) const; void symmetrize_vector_function(double_complex* fz_pw__, Gvec const& gvec__, Communicator const& comm__) const; void symmetrize_vector_function(double_complex* fx_pw__, double_complex* fy_pw__, double_complex* fz_pw__, Gvec const& gvec__, Communicator const& comm__) const; void symmetrize_function(mdarray<double, 3>& frlm__, Communicator const& comm__) const; void symmetrize_vector_function(mdarray<double, 3>& fz_rlm__, Communicator const& comm__) const; void symmetrize_vector_function(mdarray<double, 3>& fx_rlm__, mdarray<double, 3>& fy_rlm__, mdarray<double, 3>& fz_rlm__, Communicator const& comm__) const; int get_irreducible_reciprocal_mesh(vector3d<int> k_mesh__, vector3d<int> is_shift__, mdarray<double, 2>& kp__, std::vector<double>& wk__) const; matrix3d<double> const& lattice_vectors() const { return lattice_vectors_; } matrix3d<double> const& inverse_lattice_vectors() const { return inverse_lattice_vectors_; } }; inline Symmetry::Symmetry(matrix3d<double>& lattice_vectors__, int num_atoms__, mdarray<double, 2>& positions__, mdarray<double, 2>& spins__, std::vector<int>& types__, double tolerance__) : lattice_vectors_(lattice_vectors__) , num_atoms_(num_atoms__) , types_(types__) , tolerance_(tolerance__) { PROFILE("sirius::Symmetry::Symmetry"); if (lattice_vectors__.det() < 0) { std::stringstream s; s << "spglib requires positive determinant for a matrix of lattice vectors"; TERMINATE(s); } double lattice[3][3]; for (int i: {0, 1, 2}) { for (int j: {0, 1, 2}) { lattice[i][j] = lattice_vectors_(i, j); } } positions_ = mdarray<double, 2>(3, num_atoms_); for (int ia = 0; ia < num_atoms_; ia++) { for (int x: {0, 1, 2}) { positions_(x, ia) = positions__(x, ia); } } spg_dataset_ = spg_get_dataset(lattice, (double(*)[3])&positions_(0, 0), &types_[0], num_atoms_, tolerance_); if (spg_dataset_ == NULL) { TERMINATE("spg_get_dataset() returned NULL"); } if (spg_dataset_->spacegroup_number == 0) { TERMINATE("spg_get_dataset() returned 0 for the space group"); } if (spg_dataset_->n_atoms != num_atoms__) { std::stringstream s; s << "spg_get_dataset() returned wrong number of atoms (" << spg_dataset_->n_atoms << ")" << std::endl << "expected number of atoms is " << num_atoms__; TERMINATE(s); } inverse_lattice_vectors_ = inverse(lattice_vectors_); for (int isym = 0; isym < spg_dataset_->n_operations; isym++) { space_group_symmetry_descriptor sym_op; sym_op.R = matrix3d<int>(spg_dataset_->rotations[isym]); sym_op.t = vector3d<double>(spg_dataset_->translations[isym][0], spg_dataset_->translations[isym][1], spg_dataset_->translations[isym][2]); int p = sym_op.R.det(); if (!(p == 1 || p == -1)) TERMINATE("wrong rotation matrix"); sym_op.proper = p; sym_op.rotation = lattice_vectors_ * matrix3d<double>(sym_op.R * p) * inverse_lattice_vectors_; sym_op.euler_angles = euler_angles(sym_op.rotation); space_group_symmetry_.push_back(sym_op); } sym_table_ = mdarray<int, 2>(num_atoms_, num_spg_sym()); /* loop over spatial symmetries */ for (int isym = 0; isym < num_spg_sym(); isym++) { for (int ia = 0; ia < num_atoms_; ia++) { auto R = space_group_symmetry(isym).R; auto t = space_group_symmetry(isym).t; /* spatial transform */ vector3d<double> pos(positions__(0, ia), positions__(1, ia), positions__(2, ia)); auto v = reduce_coordinates(R * pos + t); int ja = -1; /* check for equivalent atom */ for (int k = 0; k < num_atoms_; k++) { vector3d<double> pos1(positions__(0, k), positions__(1, k), positions__(2, k)); if ((v.first - pos1).length() < tolerance_) { ja = k; break; } } if (ja == -1) { TERMINATE("equivalent atom was not found"); } sym_table_(ia, isym) = ja; } } /* loop over spatial symmetries */ for (int isym = 0; isym < num_spg_sym(); isym++) { /* loop over spin symmetries */ for (int jsym = 0; jsym < num_spg_sym(); jsym++) { /* take proper part of rotation matrix */ auto Rspin = space_group_symmetry(jsym).rotation; int n{0}; /* check if all atoms transfrom under spatial and spin symmetries */ for (int ia = 0; ia < num_atoms_; ia++) { int ja = sym_table_(ia, isym); /* now check tha vector filed transforms from atom ia to atom ja */ /* vector field of atom is expected to be in Cartesian coordinates */ auto vd = Rspin * vector3d<double>(spins__(0, ia), spins__(1, ia), spins__(2, ia)) - vector3d<double>(spins__(0, ja), spins__(1, ja), spins__(2, ja)); if (vd.length() < 1e-10) { n++; } } /* if all atoms transform under spin rotaion, add it to a list */ if (n == num_atoms_) { magnetic_group_symmetry_descriptor mag_op; mag_op.spg_op = space_group_symmetry(isym); mag_op.isym = isym; mag_op.spin_rotation = Rspin; magnetic_group_symmetry_.push_back(mag_op); break; } } } } inline matrix3d<double> Symmetry::rot_mtrx_cart(vector3d<double> euler_angles) const { double alpha = euler_angles[0]; double beta = euler_angles[1]; double gamma = euler_angles[2]; matrix3d<double> rm; rm(0, 0) = std::cos(alpha) * std::cos(beta) * std::cos(gamma) - std::sin(alpha) * std::sin(gamma); rm(0, 1) = -std::cos(gamma) * std::sin(alpha) - std::cos(alpha) * std::cos(beta) * std::sin(gamma); rm(0, 2) = std::cos(alpha) * std::sin(beta); rm(1, 0) = std::cos(beta) * std::cos(gamma) * std::sin(alpha) + std::cos(alpha) * std::sin(gamma); rm(1, 1) = std::cos(alpha) * std::cos(gamma) - std::cos(beta) * std::sin(alpha) * std::sin(gamma); rm(1, 2) = std::sin(alpha) * std::sin(beta); rm(2, 0) = -std::cos(gamma) * std::sin(beta); rm(2, 1) = std::sin(beta) * std::sin(gamma); rm(2, 2) = std::cos(beta); return rm; } inline vector3d<double> Symmetry::euler_angles(matrix3d<double> const& rot__) const { vector3d<double> angles(0, 0, 0); if (std::abs(rot__.det() - 1) > 1e-10) { std::stringstream s; s << "determinant of rotation matrix is " << rot__.det(); TERMINATE(s); } if (std::abs(rot__(2, 2) - 1.0) < 1e-10) // cos(beta) == 1, beta = 0 { angles[0] = Utils::phi_by_sin_cos(rot__(1, 0), rot__(0, 0)); } else if (std::abs(rot__(2, 2) + 1.0) < 1e-10) // cos(beta) == -1, beta = Pi { angles[0] = Utils::phi_by_sin_cos(-rot__(0, 1), rot__(1, 1)); angles[1] = pi; } else { double beta = std::acos(rot__(2, 2)); angles[0] = Utils::phi_by_sin_cos(rot__(1, 2) / std::sin(beta), rot__(0, 2) / std::sin(beta)); angles[1] = beta; angles[2] = Utils::phi_by_sin_cos(rot__(2, 1) / std::sin(beta), -rot__(2, 0) / std::sin(beta)); } auto rm1 = rot_mtrx_cart(angles); for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { if (std::abs(rot__(i, j) - rm1(i, j)) > 1e-10) { std::stringstream s; s << "matrices don't match" << std::endl << "initial symmetry matrix: " << std::endl << rot__(0, 0) << " " << rot__(0, 1) << " " << rot__(0, 2) << std::endl << rot__(1, 0) << " " << rot__(1, 1) << " " << rot__(1, 2) << std::endl << rot__(2, 0) << " " << rot__(2, 1) << " " << rot__(2, 2) << std::endl << "euler angles : " << angles[0] / pi << " " << angles[1] / pi << " " << angles[2] / pi << std::endl << "computed symmetry matrix : " << std::endl << rm1(0, 0) << " " << rm1(0, 1) << " " << rm1(0, 2) << std::endl << rm1(1, 0) << " " << rm1(1, 1) << " " << rm1(1, 2) << std::endl << rm1(2, 0) << " " << rm1(2, 1) << " " << rm1(2, 2) << std::endl; TERMINATE(s); } } } return angles; } inline int Symmetry::get_irreducible_reciprocal_mesh(vector3d<int> k_mesh__, vector3d<int> is_shift__, mdarray<double, 2>& kp__, std::vector<double>& wk__) const { int nktot = k_mesh__[0] * k_mesh__[1] * k_mesh__[2]; mdarray<int, 2> grid_address(3, nktot); std::vector<int> ikmap(nktot); double lattice[3][3]; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) lattice[i][j] = lattice_vectors_(i, j); } int nknr = spg_get_ir_reciprocal_mesh((int(*)[3])&grid_address(0, 0), &ikmap[0], &k_mesh__[0], &is_shift__[0], 1, lattice, (double(*)[3])&positions_(0, 0), &types_[0], num_atoms_, tolerance_); std::map<int, int> wknr; for (int ik = 0; ik < nktot; ik++) { if (wknr.count(ikmap[ik]) == 0) wknr[ikmap[ik]] = 0; wknr[ikmap[ik]] += 1; } wk__ = std::vector<double>(nknr); kp__ = mdarray<double, 2>(3, nknr); int n = 0; for (auto it = wknr.begin(); it != wknr.end(); it++) { wk__[n] = double(it->second) / nktot; for (int x = 0; x < 3; x++) { kp__(x, n) = double(grid_address(x, it->first) + is_shift__[x] / 2.0) / k_mesh__[x]; } n++; } return nknr; } inline void Symmetry::check_gvec_symmetry(Gvec const& gvec__, Communicator const& comm__) const { PROFILE("sirius::Symmetry::check_gvec_symmetry"); int gvec_count = gvec__.gvec_count(comm__.rank()); int gvec_offset = gvec__.gvec_offset(comm__.rank()); #pragma omp parallel for for (int isym = 0; isym < num_mag_sym(); isym++) { auto sm = magnetic_group_symmetry(isym).spg_op.R; for (int igloc = 0; igloc < gvec_count; igloc++) { int ig = gvec_offset + igloc; auto gv = gvec__.gvec(ig); /* apply symmetry operation to the G-vector */ auto gv_rot = transpose(sm) * gv; //== /* check limits */ //== for (int x: {0, 1, 2}) { //== auto limits = gvec__.fft_box().limits(x); //== /* check boundaries */ //== if (gv_rot[x] < limits.first || gv_rot[x] > limits.second) { //== std::stringstream s; //== s << "rotated G-vector is outside of grid limits" << std::endl //== << "original G-vector: " << gv << ", length: " << gvec__.cart(ig).length() << std::endl //== << "rotation matrix: " << std::endl //== << sm(0, 0) << " " << sm(0, 1) << " " << sm(0, 2) << std::endl //== << sm(1, 0) << " " << sm(1, 1) << " " << sm(1, 2) << std::endl //== << sm(2, 0) << " " << sm(2, 1) << " " << sm(2, 2) << std::endl //== << "rotated G-vector: " << gv_rot << std::endl //== << "limits: " //== << gvec__.fft_box().limits(0).first << " " << gvec__.fft_box().limits(0).second << " " //== << gvec__.fft_box().limits(1).first << " " << gvec__.fft_box().limits(1).second << " " //== << gvec__.fft_box().limits(2).first << " " << gvec__.fft_box().limits(2).second; //== TERMINATE(s); //== } //== } int ig_rot = gvec__.index_by_gvec(gv_rot); /* special case where -G is equal to G */ if (gvec__.reduced() && ig_rot < 0) { gv_rot = gv_rot * (-1); ig_rot = gvec__.index_by_gvec(gv_rot); } if (ig_rot < 0 || ig_rot >= gvec__.num_gvec()) { std::stringstream s; s << "rotated G-vector index is wrong" << std::endl << "original G-vector: " << gv << std::endl << "rotation matrix: " << std::endl << sm(0, 0) << " " << sm(0, 1) << " " << sm(0, 2) << std::endl << sm(1, 0) << " " << sm(1, 1) << " " << sm(1, 2) << std::endl << sm(2, 0) << " " << sm(2, 1) << " " << sm(2, 2) << std::endl << "rotated G-vector: " << gv_rot << std::endl << "rotated G-vector index: " << ig_rot << std::endl << "number of G-vectors: " << gvec__.num_gvec(); TERMINATE(s); } } } } inline void Symmetry::symmetrize_function(double_complex* f_pw__, Gvec const& gvec__, Communicator const& comm__) const { PROFILE("sirius::Symmetry::symmetrize_function_pw"); int gvec_count = gvec__.gvec_count(comm__.rank()); int gvec_offset = gvec__.gvec_offset(comm__.rank()); mdarray<double_complex, 1> sym_f_pw(gvec__.num_gvec()); sym_f_pw.zero(); double* ptr = (double*)&sym_f_pw(0); #pragma omp parallel for for (int i = 0; i < num_mag_sym(); i++) { /* full space-group symmetry operation is {R|t} */ auto R = magnetic_group_symmetry(i).spg_op.R; auto t = magnetic_group_symmetry(i).spg_op.t; for (int igloc = 0; igloc < gvec_count; igloc++) { int ig = gvec_offset + igloc; double_complex z = f_pw__[ig] * std::exp(double_complex(0, twopi * (gvec__.gvec(ig) * t))); /* apply symmetry operation to the G-vector; * remember that we move R from acting on x to acting on G: G(Rx) = (GR)x; * GR is a vector-matrix multiplication [G][.....] * [..R..] * [.....] * which can also be written as matrix^{T}-vector operation */ auto gv_rot = transpose(R) * gvec__.gvec(ig); /* index of a rotated G-vector */ int ig_rot = gvec__.index_by_gvec(gv_rot); if (gvec__.reduced() && ig_rot == -1) { gv_rot = gv_rot * (-1); int ig_rot = gvec__.index_by_gvec(gv_rot); #pragma omp atomic update ptr[2 * ig_rot] += z.real(); #pragma omp atomic update ptr[2 * ig_rot + 1] -= z.imag(); } else { assert(ig_rot >= 0 && ig_rot < gvec__.num_gvec()); #pragma omp atomic update ptr[2 * ig_rot] += z.real(); #pragma omp atomic update ptr[2 * ig_rot + 1] += z.imag(); } } } comm__.allreduce(&sym_f_pw(0), gvec__.num_gvec()); double nrm = 1 / double(num_mag_sym()); #pragma omp parallel for for (int ig = 0; ig < gvec__.num_gvec(); ig++) { f_pw__[ig] = sym_f_pw(ig) * nrm; } } inline void Symmetry::symmetrize_vector_function(double_complex* fz_pw__, Gvec const& gvec__, Communicator const& comm__) const { PROFILE("sirius::Symmetry::symmetrize_vector_function_pw"); int gvec_count = gvec__.gvec_count(comm__.rank()); int gvec_offset = gvec__.gvec_offset(comm__.rank()); mdarray<double_complex, 1> sym_f_pw(gvec__.num_gvec()); sym_f_pw.zero(); double* ptr = (double*)&sym_f_pw(0); #pragma omp parallel for for (int i = 0; i < num_mag_sym(); i++) { /* full space-group symmetry operation is {R|t} */ auto R = magnetic_group_symmetry(i).spg_op.R; auto t = magnetic_group_symmetry(i).spg_op.t; auto S = magnetic_group_symmetry(i).spin_rotation; for (int igloc = 0; igloc < gvec_count; igloc++) { int ig = gvec_offset + igloc; auto gv_rot = transpose(R) * gvec__.gvec(ig); /* index of a rotated G-vector */ int ig_rot = gvec__.index_by_gvec(gv_rot); double_complex z = fz_pw__[ig] * std::exp(double_complex(0, twopi * (gvec__.gvec(ig) * t))) * S(2, 2); if (gvec__.reduced() && ig_rot == -1) { gv_rot = gv_rot * (-1); int ig_rot = gvec__.index_by_gvec(gv_rot); #pragma omp atomic update ptr[2 * ig_rot] += z.real(); #pragma omp atomic update ptr[2 * ig_rot + 1] -= z.imag(); } else { assert(ig_rot >= 0 && ig_rot < gvec__.num_gvec()); #pragma omp atomic update ptr[2 * ig_rot] += z.real(); #pragma omp atomic update ptr[2 * ig_rot + 1] += z.imag(); } } } comm__.allreduce(&sym_f_pw(0), gvec__.num_gvec()); for (int ig = 0; ig < gvec__.num_gvec(); ig++) { fz_pw__[ig] = sym_f_pw(ig) / double(num_mag_sym()); } } inline void Symmetry::symmetrize_vector_function(double_complex* fx_pw__, double_complex* fy_pw__, double_complex* fz_pw__, Gvec const& gvec__, Communicator const& comm__) const { PROFILE("sirius::Symmetry::symmetrize_vector_function_pw"); int gvec_count = gvec__.gvec_count(comm__.rank()); int gvec_offset = gvec__.gvec_offset(comm__.rank()); mdarray<double_complex, 1> sym_fx_pw(gvec__.num_gvec()); mdarray<double_complex, 1> sym_fy_pw(gvec__.num_gvec()); mdarray<double_complex, 1> sym_fz_pw(gvec__.num_gvec()); sym_fx_pw.zero(); sym_fy_pw.zero(); sym_fz_pw.zero(); double* ptr_x = (double*)&sym_fx_pw(0); double* ptr_y = (double*)&sym_fy_pw(0); double* ptr_z = (double*)&sym_fz_pw(0); std::vector<double_complex*> v_pw_in({fx_pw__, fy_pw__, fz_pw__}); #pragma omp parallel for for (int i = 0; i < num_mag_sym(); i++) { /* full space-group symmetry operation is {R|t} */ auto R = magnetic_group_symmetry(i).spg_op.R; auto t = magnetic_group_symmetry(i).spg_op.t; auto S = magnetic_group_symmetry(i).spin_rotation; for (int igloc = 0; igloc < gvec_count; igloc++) { int ig = gvec_offset + igloc; auto gv_rot = transpose(R) * gvec__.gvec(ig); /* index of a rotated G-vector */ int ig_rot = gvec__.index_by_gvec(gv_rot); assert(ig_rot >= 0 && ig_rot < gvec__.num_gvec()); double_complex phase = std::exp(double_complex(0, twopi * (gvec__.gvec(ig) * t))); double_complex vz[] = {double_complex(0, 0), double_complex(0, 0), double_complex(0, 0)}; for (int j: {0, 1, 2}) { for (int k: {0, 1, 2}) { vz[j] += phase * S(j, k) * v_pw_in[k][ig]; } } #pragma omp atomic update ptr_x[2 * ig_rot] += vz[0].real(); #pragma omp atomic update ptr_y[2 * ig_rot] += vz[1].real(); #pragma omp atomic update ptr_z[2 * ig_rot] += vz[2].real(); #pragma omp atomic update ptr_x[2 * ig_rot + 1] += vz[0].imag(); #pragma omp atomic update ptr_y[2 * ig_rot + 1] += vz[1].imag(); #pragma omp atomic update ptr_z[2 * ig_rot + 1] += vz[2].imag(); } } comm__.allreduce(&sym_fx_pw(0), gvec__.num_gvec()); comm__.allreduce(&sym_fy_pw(0), gvec__.num_gvec()); comm__.allreduce(&sym_fz_pw(0), gvec__.num_gvec()); for (int ig = 0; ig < gvec__.num_gvec(); ig++) { fx_pw__[ig] = sym_fx_pw(ig) / double(num_mag_sym()); fy_pw__[ig] = sym_fy_pw(ig) / double(num_mag_sym()); fz_pw__[ig] = sym_fz_pw(ig) / double(num_mag_sym()); } } inline void Symmetry::symmetrize_function(mdarray<double, 3>& frlm__, Communicator const& comm__) const { PROFILE("sirius::Symmetry::symmetrize_function_mt"); int lmmax = (int)frlm__.size(0); int nrmax = (int)frlm__.size(1); if (num_atoms_ != (int)frlm__.size(2)) TERMINATE("wrong number of atoms"); splindex<block> spl_atoms(num_atoms_, comm__.size(), comm__.rank()); int lmax = Utils::lmax_by_lmmax(lmmax); mdarray<double, 2> rotm(lmmax, lmmax); mdarray<double, 3> fsym(lmmax, nrmax, spl_atoms.local_size()); fsym.zero(); double alpha = 1.0 / double(num_mag_sym()); for (int i = 0; i < num_mag_sym(); i++) { /* full space-group symmetry operation is {R|t} */ int pr = magnetic_group_symmetry(i).spg_op.proper; auto eang = magnetic_group_symmetry(i).spg_op.euler_angles; int isym = magnetic_group_symmetry(i).isym; SHT::rotation_matrix(lmax, eang, pr, rotm); for (int ia = 0; ia < num_atoms_; ia++) { int ja = sym_table_(ia, isym); auto location = spl_atoms.location(ja); if (location.rank == comm__.rank()) { linalg<CPU>::gemm(0, 0, lmmax, nrmax, lmmax, alpha, rotm.at<CPU>(), rotm.ld(), frlm__.at<CPU>(0, 0, ia), frlm__.ld(), 1.0, fsym.at<CPU>(0, 0, location.local_index), fsym.ld()); } } } double* sbuf = spl_atoms.local_size() ? fsym.at<CPU>() : nullptr; comm__.allgather(sbuf, frlm__.at<CPU>(), lmmax * nrmax * spl_atoms.global_offset(), lmmax * nrmax * spl_atoms.local_size()); } inline void Symmetry::symmetrize_vector_function(mdarray<double, 3>& vz_rlm__, Communicator const& comm__) const { PROFILE("sirius::Symmetry::symmetrize_vector_function_mt"); int lmmax = (int)vz_rlm__.size(0); int nrmax = (int)vz_rlm__.size(1); splindex<block> spl_atoms(num_atoms_, comm__.size(), comm__.rank()); if (num_atoms_ != (int)vz_rlm__.size(2)) { TERMINATE("wrong number of atoms"); } int lmax = Utils::lmax_by_lmmax(lmmax); mdarray<double, 2> rotm(lmmax, lmmax); mdarray<double, 3> fsym(lmmax, nrmax, spl_atoms.local_size()); fsym.zero(); double alpha = 1.0 / double(num_mag_sym()); for (int i = 0; i < num_mag_sym(); i++) { /* full space-group symmetry operation is {R|t} */ int pr = magnetic_group_symmetry(i).spg_op.proper; auto eang = magnetic_group_symmetry(i).spg_op.euler_angles; int isym = magnetic_group_symmetry(i).isym; auto S = magnetic_group_symmetry(i).spin_rotation; SHT::rotation_matrix(lmax, eang, pr, rotm); for (int ia = 0; ia < num_atoms_; ia++) { int ja = sym_table_(ia, isym); auto location = spl_atoms.location(ja); if (location.rank == comm__.rank()) { linalg<CPU>::gemm(0, 0, lmmax, nrmax, lmmax, alpha * S(2, 2), rotm.at<CPU>(), rotm.ld(), vz_rlm__.at<CPU>(0, 0, ia), vz_rlm__.ld(), 1.0, fsym.at<CPU>(0, 0, location.local_index), fsym.ld()); } } } double* sbuf = spl_atoms.local_size() ? fsym.at<CPU>() : nullptr; comm__.allgather(sbuf, vz_rlm__.at<CPU>(), lmmax * nrmax * spl_atoms.global_offset(), lmmax * nrmax * spl_atoms.local_size()); } inline void Symmetry::symmetrize_vector_function(mdarray<double, 3>& vx_rlm__, mdarray<double, 3>& vy_rlm__, mdarray<double, 3>& vz_rlm__, Communicator const& comm__) const { PROFILE("sirius::Symmetry::symmetrize_vector_function_mt"); int lmmax = (int)vx_rlm__.size(0); int nrmax = (int)vx_rlm__.size(1); splindex<block> spl_atoms(num_atoms_, comm__.size(), comm__.rank()); int lmax = Utils::lmax_by_lmmax(lmmax); mdarray<double, 2> rotm(lmmax, lmmax); mdarray<double, 4> v_sym(lmmax, nrmax, spl_atoms.local_size(), 3); v_sym.zero(); mdarray<double, 3> vtmp(lmmax, nrmax, 3); double alpha = 1.0 / double(num_mag_sym()); std::vector<mdarray<double, 3>*> vrlm({&vx_rlm__, &vy_rlm__, &vz_rlm__}); for (int i = 0; i < num_mag_sym(); i++) { /* full space-group symmetry operation is {R|t} */ int pr = magnetic_group_symmetry(i).spg_op.proper; auto eang = magnetic_group_symmetry(i).spg_op.euler_angles; int isym = magnetic_group_symmetry(i).isym; auto S = magnetic_group_symmetry(i).spin_rotation; SHT::rotation_matrix(lmax, eang, pr, rotm); for (int ia = 0; ia < num_atoms_; ia++) { int ja = sym_table_(ia, isym); auto location = spl_atoms.location(ja); if (location.rank == comm__.rank()) { for (int k: {0, 1, 2}) { linalg<CPU>::gemm(0, 0, lmmax, nrmax, lmmax, alpha, rotm.at<CPU>(), rotm.ld(), vrlm[k]->at<CPU>(0, 0, ia), vrlm[k]->ld(), 0.0, vtmp.at<CPU>(0, 0, k), vtmp.ld()); } #pragma omp parallel for (int k: {0, 1, 2}) { for (int j: {0, 1, 2}) { #pragma omp for for (int ir = 0; ir < nrmax; ir++) { for (int lm = 0; lm < lmmax; lm++) { v_sym(lm, ir, location.local_index, k) += S(k, j) * vtmp(lm, ir, j); } } } } } } } for (int k: {0, 1, 2}) { double* sbuf = spl_atoms.local_size() ? v_sym.at<CPU>(0, 0, 0, k) : nullptr; comm__.allgather(sbuf, vrlm[k]->at<CPU>(), lmmax * nrmax * spl_atoms.global_offset(), lmmax * nrmax * spl_atoms.local_size()); } } } // namespace /** \page sym Symmetry * \section section1 Definition of symmetry operation * * SIRIUS uses Spglib to find the spacial symmetry operations. Spglib defines symmetry operation in fractional * coordinates: * \f[ * {\bf x'} = \{ {\bf R} | {\bf t} \} {\bf x} \equiv {\bf R}{\bf x} + {\bf t} * \f] * where \b R is the proper or improper rotation matrix with elements equal to -1,0,1 and determinant of 1 * (pure rotation) or -1 (rotoreflection) and \b t is the fractional translation, associated with the symmetry * operation. The inverse of the symmetry operation is: * \f[ * {\bf x} = \{ {\bf R} | {\bf t} \}^{-1} {\bf x'} = {\bf R}^{-1} ({\bf x'} - {\bf t}) = * {\bf R}^{-1} {\bf x'} - {\bf R}^{-1} {\bf t} * \f] * * We will always use an \a active transformation (transformation of vectors or functions) and never a passive * transformation (transformation of coordinate system). However one should remember definition of the function * transformation: * \f[ * \hat {\bf P} f({\bf r}) \equiv f(\hat {\bf P}^{-1} {\bf r}) * \f] * * It is straightforward to get the rotation matrix in Cartesian coordinates. We know how the vector in Cartesian * coordinates is obtained from the vector in fractional coordinates: * \f[ * {\bf v} = {\bf L} {\bf x} * \f] * where \b L is the 3x3 matrix which clomuns are three lattice vectors. The backward transformation is simply * \f[ * {\bf x} = {\bf L}^{-1} {\bf v} * \f] * Now we write rotation operation in fractional coordinates and apply the backward transformation to Cartesian * coordinates: * \f[ * {\bf x'} = {\bf R}{\bf x} \rightarrow {\bf L}^{-1} {\bf v'} = {\bf R} {\bf L}^{-1} {\bf v} * \f] * from which we derive the rotation operation in Cartesian coordinates: * \f[ * {\bf v'} = {\bf L} {\bf R} {\bf L}^{-1} {\bf v} * \f] */ #endif // __SYMMETRY_H__
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (w=0; w < (ssize_t) width; w+=2) { ssize_t j, k, u, v; kernel[w]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-w),(width-w)*sizeof(**kernel))); if (kernel[w] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-w-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[w][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[w][k]; k++; } } kernel[w][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[w][(k-1)/2]=1.0; } if (w < (ssize_t) width) { for (w-=2; w >= 0; w-=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { const Quantum *magick_restrict r; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { const Quantum *magick_restrict p; ssize_t i; ssize_t center, j; j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5)); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const double *magick_restrict k; const Quantum *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (w=0; w < (ssize_t) width; w+=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (w=0; w < (ssize_t) width; w+=2) { ssize_t j, k, u, v; kernel[w]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-w),(width-w)*sizeof(**kernel))); if (kernel[w] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-w-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[w][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[w][k]; k++; } } kernel[w][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[w][(k-1)/2]=1.0; } if (w < (ssize_t) width) { for (w-=2; w >= 0; w-=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { const Quantum *magick_restrict r; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { const Quantum *magick_restrict p; ssize_t i; ssize_t center, j; j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5)); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; const double *magick_restrict k; const Quantum *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if ((sharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (w=0; w < (ssize_t) width; w+=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l a t e r a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilateralBlurImage() is a non-linear, edge-preserving, and noise-reducing % smoothing filter for images. It replaces the intensity of each pixel with % a weighted average of intensity values from nearby pixels. This weight is % based on a Gaussian distribution. The weights depend not only on Euclidean % distance of pixels, but also on the radiometric differences (e.g., range % differences, such as color intensity, depth distance, etc.). This preserves % sharp edges. % % The format of the BilateralBlurImage method is: % % Image *BilateralBlurImage(const Image *image,const size_t width, % const size_t height,const double intensity_sigma, % const double spatial_sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the neighborhood in pixels. % % o height: the height of the neighborhood in pixels. % % o intensity_sigma: sigma in the intensity space. A larger value means % that farther colors within the pixel neighborhood (see spatial_sigma) % will be mixed together, resulting in larger areas of semi-equal color. % % o spatial_sigma: sigma in the coordinate space. A larger value means that % farther pixels influence each other as long as their colors are close % enough (see intensity_sigma ). When the neigborhood diameter is greater % than zero, it specifies the neighborhood size regardless of % spatial_sigma. Otherwise, the neigborhood diameter is proportional to % spatial_sigma. % % o exception: return any errors or warnings in this structure. % */ static inline double BlurDistance(const ssize_t x,const ssize_t y, const ssize_t u,const ssize_t v) { return(sqrt(((double) x-u)*((double) x-u)+((double) y-v)*((double) y-v))); } static inline double BlurGaussian(const double x,const double sigma) { return(exp(-((double) x*x)*PerceptibleReciprocal(2.0*sigma*sigma))* PerceptibleReciprocal(Magick2PI*sigma*sigma)); } static double **DestroyBilateralThreadSet(const ssize_t number_threads, double **weights) { ssize_t i; assert(weights != (double **) NULL); for (i=0; i <= (ssize_t) number_threads; i++) if (weights[i] != (double *) NULL) weights[i]=(double *) RelinquishMagickMemory(weights[i]); weights=(double **) RelinquishMagickMemory(weights); return(weights); } static double **AcquireBilateralThreadSet(const size_t number_threads, const size_t width,const size_t height) { double **weights; ssize_t i; weights=(double **) AcquireQuantumMemory(number_threads+1,sizeof(*weights)); if (weights == (double **) NULL) return((double **) NULL); (void) memset(weights,0,number_threads*sizeof(*weights)); for (i=0; i <= (ssize_t) number_threads; i++) { weights[i]=(double *) AcquireQuantumMemory(width,height*sizeof(**weights)); if (weights[i] == (double *) NULL) return(DestroyBilateralThreadSet(number_threads,weights)); } return(weights); } MagickExport Image *BilateralBlurImage(const Image *image,const size_t width, const size_t height,const double intensity_sigma,const double spatial_sigma, ExceptionInfo *exception) { #define MaxIntensity (255) #define BilateralBlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double intensity_gaussian[2*(MaxIntensity+1)], *spatial_gaussian, **weights; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo mid; ssize_t number_threads, w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } number_threads=(size_t) GetMagickResourceLimit(ThreadResource); weights=AcquireBilateralThreadSet(number_threads,width,height); if (weights == (double **) NULL) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (w=(-MaxIntensity); w < MaxIntensity; w++) intensity_gaussian[w+MaxIntensity]=BlurGaussian((double) w,intensity_sigma); spatial_gaussian=weights[number_threads]; { ssize_t n, v; n=0; mid.x=(ssize_t) (width/2L); mid.y=(ssize_t) (height/2L); for (v=0; v < (ssize_t) height; v++) { ssize_t u; for (u=0; u < (ssize_t) width; u++) spatial_gaussian[n++]=BlurGaussian(BlurDistance(0,0,u-mid.x,v-mid.y), spatial_sigma); } } /* Bilateral blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { double gamma, pixel; const Quantum *magick_restrict p, *magick_restrict r; ssize_t i, u; ssize_t n, v; /* Tonal weighting preserves edges while smoothing in the flat regions. */ p=GetCacheViewVirtualPixels(image_view,x-mid.x,y-mid.y,width,height, exception); if (p == (const Quantum *) NULL) break; p+=(ssize_t) GetPixelChannels(image)*width*mid.y+GetPixelChannels(image)* mid.x; n=0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { double intensity; r=p+(ssize_t) GetPixelChannels(image)*(ssize_t) width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); intensity=ScaleQuantumToChar(GetPixelIntensity(image,r))- (double) ScaleQuantumToChar(GetPixelIntensity(image,p)); if ((intensity >= -MaxIntensity) && (intensity <= MaxIntensity)) weights[id][n]=intensity_gaussian[(ssize_t) intensity+MaxIntensity]* spatial_gaussian[n]; else weights[id][n]=BlurGaussian(intensity,intensity_sigma)* BlurGaussian(BlurDistance(x,y,x+u-mid.x,y+v-mid.y),spatial_sigma); n++; } } for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { PixelChannel channel; PixelTrait blur_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } pixel=0.0; gamma=0.0; n=0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { double alpha, beta; r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]*alpha*beta; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); } q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BilateralBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); weights=DestroyBilateralThreadSet(number_threads,weights); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*((ssize_t) columns+2)+x_offset); s=q-(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you. % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanline, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanline_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanline_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanline)); if (scanline_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanline=(float *) GetVirtualMemoryBlob(scanline_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanline_info=RelinquishVirtualMemory(scanline_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; Quantum *magick_restrict q; ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; PixelTrait traits; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(contrast_image,ClampToQuantum((MagickRealType) GetPixelRed(image,p)*mult),q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(contrast_image,ClampToQuantum((MagickRealType) GetPixelGreen(image,p)*mult),q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(contrast_image,ClampToQuantum((MagickRealType) GetPixelBlue(image,p)*mult),q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanline_info=RelinquishVirtualMemory(scanline_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; size_t width; ssize_t w, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (w=0; w < (ssize_t) width; w++) { offset[w].x=CastDoubleToLong(ceil((double) (w*point.y)/ hypot(point.x,point.y)-0.5)); offset[w].y=CastDoubleToLong(ceil((double) (w*point.x)/ hypot(point.x,point.y)-0.5)); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const Quantum *magick_restrict r; MagickRealType *magick_restrict k; ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { RectangleInfo raise; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; raise.width=(size_t) (2*i+2); raise.height=(size_t) (2*i+2); raise.x=(i-1)/2; raise.y=(i-1)/2; (void) RaiseImage(preview_image,&raise,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double) raise.height,(double) raise.x,(double) raise.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; preview_image->alpha_trait=UndefinedPixelTrait; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; size_t n; ssize_t w, y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n,sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n,sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (w=0; w < (ssize_t) n; w++) { cos_theta[w]=cos((double) (theta*w-offset)); sin_theta[w]=sin((double) (theta*w-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const Quantum *magick_restrict r; ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; size_t width; ssize_t center, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); { ssize_t i, j, v; j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { ssize_t u; for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; const Quantum *magick_restrict l, *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const MagickRealType *magick_restrict k; const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define GetShadeIntensity(image,pixel) \ ClampPixel(GetPixelIntensity((image),(pixel))) #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post)+ GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre)- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if ((shade_traits & CopyPixelTrait) != 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); if (status == MagickFalse) break; q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); /* This kernel appears to be broken. #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif */ unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if ((unsharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
GB_unop__carg_fp64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__carg_fp64_fc64) // op(A') function: GB (_unop_tran__carg_fp64_fc64) // C type: double // A type: GxB_FC64_t // cast: GxB_FC64_t cij = (aij) // unaryop: cij = carg (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = carg (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = (aij) ; \ Cx [pC] = carg (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CARG || GxB_NO_FP64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__carg_fp64_fc64) ( double *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = carg (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = carg (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__carg_fp64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ccl_core.c
#include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <math.h> #include <string.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_odeiv.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_interp2d.h> #include <gsl/gsl_spline2d.h> #include <gsl/gsl_integration.h> #include "ccl.h" // // Macros for replacing relative paths #define EXPAND_STR(s) STRING(s) #define STRING(s) #s const ccl_configuration default_config = { ccl_boltzmann_class, ccl_halofit, ccl_nobaryons, ccl_tinker10, ccl_duffy2008, ccl_emu_strict}; //Precision parameters /** * Default relative precision if not otherwise specified */ #define GSL_EPSREL 1E-4 /** * Default number of iterations for integration and root-finding if not otherwise * specified */ #define GSL_N_ITERATION 1000 /** * Default number of Gauss-Kronrod points in QAG integration if not otherwise * specified */ #define GSL_INTEGRATION_GAUSS_KRONROD_POINTS GSL_INTEG_GAUSS41 /** * Relative precision in sigma_R calculations */ #define GSL_EPSREL_SIGMAR 1E-5 /** * Relative precision in k_NL calculations */ #define GSL_EPSREL_KNL 1E-5 /** * Relative precision in distance calculations */ #define GSL_EPSREL_DIST 1E-6 /** * Relative precision in growth calculations */ #define GSL_EPSREL_GROWTH 1E-6 /** * Relative precision in dNdz calculations */ #define GSL_EPSREL_DNDZ 1E-6 const ccl_gsl_params default_gsl_params = { GSL_N_ITERATION, // N_ITERATION GSL_INTEGRATION_GAUSS_KRONROD_POINTS,// INTEGRATION_GAUSS_KRONROD_POINTS GSL_EPSREL, // INTEGRATION_EPSREL GSL_INTEGRATION_GAUSS_KRONROD_POINTS,// INTEGRATION_LIMBER_GAUSS_KRONROD_POINTS GSL_EPSREL, // INTEGRATION_LIMBER_EPSREL GSL_EPSREL_DIST, // INTEGRATION_DISTANCE_EPSREL GSL_EPSREL_SIGMAR, // INTEGRATION_SIGMAR_EPSREL GSL_EPSREL_KNL, // INTEGRATION_KNL_EPSREL GSL_EPSREL, // ROOT_EPSREL GSL_N_ITERATION, // ROOT_N_ITERATION GSL_EPSREL_GROWTH, // ODE_GROWTH_EPSREL 1E-6, // EPS_SCALEFAC_GROWTH 1E7, // HM_MMIN 1E17, // HM_MMAX 0.0, // HM_EPSABS 1E-4, // HM_EPSREL 1000, // HM_LIMIT GSL_INTEG_GAUSS41 // HM_INT_METHOD }; #undef GSL_EPSREL #undef GSL_N_ITERATION #undef GSL_INTEGRATION_GAUSS_KRONROD_POINTS #undef GSL_EPSREL_SIGMAR #undef GSL_EPSREL_KNL #undef GSL_EPSREL_DIST #undef GSL_EPSREL_GROWTH #undef GSL_EPSREL_DNDZ const ccl_spline_params default_spline_params = { // scale factor spline params 250, // A_SPLINE_NA 0.1, // A_SPLINE_MIN 0.01, // A_SPLINE_MINLOG_PK 0.1, // A_SPLINE_MIN_PK, 0.01, // A_SPLINE_MINLOG_SM, 0.1, // A_SPLINE_MIN_SM, 1.0, // A_SPLINE_MAX, 0.0001, // A_SPLINE_MINLOG, 250, // A_SPLINE_NLOG, // mass splines 0.025, // LOGM_SPLINE_DELTA 50, // LOGM_SPLINE_NM 6, // LOGM_SPLINE_MIN 17, // LOGM_SPLINE_MAX // PS a and k spline 13, // A_SPLINE_NA_SM 6, // A_SPLINE_NLOG_SM 40, // A_SPLINE_NA_PK 11, // A_SPLINE_NLOG_PK // k-splines and integrals 50, // K_MAX_SPLINE 1E3, // K_MAX 5E-5, // K_MIN 0.025, // DLOGK_INTEGRATION 167, // N_K 100000, // N_K_3DCOR // correlation function parameters 0.01, // ELL_MIN_CORR 60000, // ELL_MAX_CORR 5000, // N_ELL_CORR //Spline types NULL, NULL, NULL, NULL, NULL, NULL, NULL }; ccl_physical_constants ccl_constants = { /** * Lightspeed / H0 in units of Mpc/h (from CODATA 2014) */ 2997.92458, /** * Newton's gravitational constant in units of m^3/Kg/s^2 */ //6.6738e-11, /(from PDG 2013) in m^3/Kg/s^2 //6.67428e-11, // CLASS VALUE 6.67408e-11, // from CODATA 2014 /** * Solar mass in units of kg (from GSL) */ //GSL_CONST_MKSA_SOLAR_MASS, //1.9885e30, //(from PDG 2015) in Kg 1.9884754153381438E+30, //from IAU 2015 /** * Mpc to meters (from PDG 2016 and using M_PI) */ 3.085677581491367399198952281E+22, /** * pc to meters (from PDG 2016 and using M_PI) */ 3.085677581491367399198952281E+16, /** * Rho critical in units of M_sun/h / (Mpc/h)^3 */ ((3*100*100)/(8*M_PI*6.67408e-11)) * (1000*1000*3.085677581491367399198952281E+22/1.9884754153381438E+30), /** * Boltzmann constant in units of J/K */ //GSL_CONST_MKSA_BOLTZMANN, 1.38064852E-23, //from CODATA 2014 /** * Stefan-Boltzmann constant in units of kg/s^3 / K^4 */ //GSL_CONST_MKSA_STEFAN_BOLTZMANN_CONSTANT, 5.670367E-8, //from CODATA 2014 /** * Planck's constant in units kg m^2 / s */ //GSL_CONST_MKSA_PLANCKS_CONSTANT_H, 6.626070040E-34, //from CODATA 2014 /** * The speed of light in m/s */ //GSL_CONST_MKSA_SPEED_OF_LIGHT, 299792458.0, //from CODATA 2014 /** * Electron volt to Joules convestion */ //GSL_CONST_MKSA_ELECTRON_VOLT, 1.6021766208e-19, //from CODATA 2014 /** * Temperature of the CMB in K */ 2.725, //2.7255, // CLASS value /** * T_ncdm, as taken from CLASS, explanatory.ini */ 0.71611, /** * neutrino mass splitting differences * See Lesgourgues and Pastor, 2012 for these values. * Adv. High Energy Phys. 2012 (2012) 608515, * arXiv:1212.6154, page 13 */ 7.62E-5, 2.55E-3, -2.43E-3 }; /* ------- ROUTINE: ccl_cosmology_create ------ INPUTS: ccl_parameters params ccl_configuration config TASK: creates the ccl_cosmology struct and passes some values to it DEFINITIONS: chi: comoving distance [Mpc] growth: growth function (density) fgrowth: logarithmic derivative of the growth (density) (dlnD/da?) E: E(a)=H(a)/H0 growth0: growth at z=0, defined to be 1 sigma: ? p_lnl: nonlinear matter power spectrum at z=0? computed_distances, computed_growth, computed_power, computed_sigma: store status of the computations */ ccl_cosmology * ccl_cosmology_create(ccl_parameters params, ccl_configuration config) { ccl_cosmology * cosmo = malloc(sizeof(ccl_cosmology)); cosmo->params = params; cosmo->config = config; cosmo->gsl_params = default_gsl_params; cosmo->spline_params = default_spline_params; cosmo->spline_params.A_SPLINE_TYPE = gsl_interp_akima; cosmo->spline_params.K_SPLINE_TYPE = gsl_interp_akima; cosmo->spline_params.M_SPLINE_TYPE = gsl_interp_akima; cosmo->spline_params.D_SPLINE_TYPE = gsl_interp_akima; cosmo->spline_params.PNL_SPLINE_TYPE = gsl_interp2d_bicubic; cosmo->spline_params.PLIN_SPLINE_TYPE = gsl_interp2d_bicubic; cosmo->spline_params.CORR_SPLINE_TYPE = gsl_interp_akima; cosmo->data.chi = NULL; cosmo->data.growth = NULL; cosmo->data.fgrowth = NULL; cosmo->data.E = NULL; cosmo->data.growth0 = 1.; cosmo->data.achi = NULL; cosmo->data.logsigma = NULL; cosmo->data.rsd_splines[0] = NULL; cosmo->data.rsd_splines[1] = NULL; cosmo->data.rsd_splines[2] = NULL; cosmo->computed_distances = false; cosmo->computed_growth = false; cosmo->computed_sigma = false; cosmo->status = 0; ccl_cosmology_set_status_message(cosmo, ""); if(cosmo->spline_params.A_SPLINE_MAX !=1.) { cosmo->status = CCL_ERROR_SPLINE; ccl_cosmology_set_status_message(cosmo, "ccl_core.c: ccl_cosmology_create(): " "A_SPLINE_MAX needs to be 1.\n"); } return cosmo; } /* ------ ROUTINE: ccl_parameters_fill_initial ------- INPUT: ccl_parameters: params TASK: fill parameters not set by ccl_parameters_create with some initial values DEFINITIONS: Omega_g = (Omega_g*h^2)/h^2 is the radiation parameter; "g" is for photons, as in CLASS T_CMB: CMB temperature in Kelvin Omega_l: Lambda A_s: amplitude of the primordial PS, enforced here to initially set to NaN sigma8: variance in 8 Mpc/h spheres for normalization of matter PS, enforced here to initially set to NaN z_star: recombination redshift */ void ccl_parameters_fill_initial(ccl_parameters * params, int *status) { // Fixed radiation parameters // Omega_g * h**2 is known from T_CMB params->T_CMB = ccl_constants.T_CMB; // kg / m^3 double rho_g = 4. * ccl_constants.STBOLTZ / pow(ccl_constants.CLIGHT, 3) * pow(params->T_CMB, 4); // kg / m^3 double rho_crit = ccl_constants.RHO_CRITICAL * ccl_constants.SOLAR_MASS/pow(ccl_constants.MPC_TO_METER, 3) * pow(params->h, 2); params->Omega_g = rho_g/rho_crit; // Get the N_nu_rel from Neff and N_nu_mass params->N_nu_rel = params->Neff - params->N_nu_mass * pow(ccl_constants.TNCDM, 4) / pow(4./11.,4./3.); // Temperature of the relativistic neutrinos in K double T_nu= (params->T_CMB) * pow(4./11.,1./3.); // in kg / m^3 double rho_nu_rel = params->N_nu_rel* 7.0/8.0 * 4. * ccl_constants.STBOLTZ / pow(ccl_constants.CLIGHT, 3) * pow(T_nu, 4); params-> Omega_nu_rel = rho_nu_rel/rho_crit; // If non-relativistic neutrinos are present, calculate the phase_space integral. if((params->N_nu_mass)>0) { params->Omega_nu_mass = ccl_Omeganuh2( 1.0, params->N_nu_mass, params->m_nu, params->T_CMB, status) / ((params->h)*(params->h)); } else{ params->Omega_nu_mass = 0.; } params->Omega_m = params->Omega_b + params-> Omega_c + params->Omega_nu_mass; params->Omega_l = 1.0 - params->Omega_m - params->Omega_g - params->Omega_nu_rel - params->Omega_k; // Initially undetermined parameters - set to nan to trigger // problems if they are mistakenly used. if (isfinite(params->A_s)) {params->sigma8 = NAN;} if (isfinite(params->sigma8)) {params->A_s = NAN;} params->z_star = NAN; if(fabs(params->Omega_k)<1E-6) params->k_sign=0; else if(params->Omega_k>0) params->k_sign=-1; else params->k_sign=1; params->sqrtk=sqrt(fabs(params->Omega_k))*params->h/ccl_constants.CLIGHT_HMPC; } /* ------ ROUTINE: ccl_parameters_create ------- INPUT: numbers for the basic cosmological parameters needed by CCL TASK: fill params with some initial values provided by the user DEFINITIONS: Omega_c: cold dark matter Omega_b: baryons Omega_m: matter Omega_k: curvature little omega_x means Omega_x*h^2 Neff : Effective number of neutrino speces mnu : Pointer to either sum of neutrino masses or list of three masses. mnu_type : how the neutrino mass(es) should be treated w0: Dark energy eq of state parameter wa: Dark energy eq of state parameter, time variation H0: Hubble's constant in km/s/Mpc. h: Hubble's constant divided by (100 km/s/Mpc). A_s: amplitude of the primordial PS n_s: index of the primordial PS */ ccl_parameters ccl_parameters_create(double Omega_c, double Omega_b, double Omega_k, double Neff, double* mnu, int n_mnu, double w0, double wa, double h, double norm_pk, double n_s, double bcm_log10Mc, double bcm_etab, double bcm_ks, double mu_0, double sigma_0, int nz_mgrowth, double *zarr_mgrowth, double *dfarr_mgrowth, int *status) { #ifndef USE_GSL_ERROR gsl_set_error_handler_off(); #endif ccl_parameters params; // Initialize params params.m_nu = NULL; params.z_mgrowth=NULL; params.df_mgrowth=NULL; params.sigma8 = NAN; params.A_s = NAN; params.Omega_c = Omega_c; params.Omega_b = Omega_b; params.Omega_k = Omega_k; params.Neff = Neff; params.m_nu = malloc(n_mnu*sizeof(double)); params.sum_nu_masses = 0.; for(int i = 0; i<n_mnu; i=i+1){ params.m_nu[i] = mnu[i]; params.sum_nu_masses = params.sum_nu_masses + mnu[i]; } if(params.sum_nu_masses<1e-15){ params.N_nu_mass = 0; }else{ params.N_nu_mass = n_mnu; } // Dark Energy params.w0 = w0; params.wa = wa; // Hubble parameters params.h = h; params.H0 = h*100; // Primordial power spectra if(norm_pk<1E-5) params.A_s=norm_pk; else params.sigma8=norm_pk; params.n_s = n_s; //Baryonic params if(bcm_log10Mc<0) params.bcm_log10Mc=log10(1.2e14); else params.bcm_log10Mc=bcm_log10Mc; if(bcm_etab<0) params.bcm_etab=0.5; else params.bcm_etab=bcm_etab; if(bcm_ks<0) params.bcm_ks=55.0; else params.bcm_ks=bcm_ks; // Params of the mu / Sigma parameterisation of MG params.mu_0 = mu_0; params.sigma_0 = sigma_0; // Set remaining standard and easily derived parameters ccl_parameters_fill_initial(&params, status); //Trigger modified growth function if nz>0 if(nz_mgrowth>0) { params.has_mgrowth=true; params.nz_mgrowth=nz_mgrowth; params.z_mgrowth=malloc(params.nz_mgrowth*sizeof(double)); params.df_mgrowth=malloc(params.nz_mgrowth*sizeof(double)); memcpy(params.z_mgrowth,zarr_mgrowth,params.nz_mgrowth*sizeof(double)); memcpy(params.df_mgrowth,dfarr_mgrowth,params.nz_mgrowth*sizeof(double)); } else { params.has_mgrowth=false; params.nz_mgrowth=0; params.z_mgrowth=NULL; params.df_mgrowth=NULL; } return params; } /** * Write a cosmology parameters object to a file in yaml format. * @param cosmo Cosmological parameters * @param f FILE* pointer opened for reading * @return void */ void ccl_parameters_write_yaml(ccl_parameters * params, const char * filename, int *status) { FILE * f = fopen(filename, "w"); if (!f){ *status = CCL_ERROR_FILE_WRITE; return; } #define WRITE_DOUBLE(name) fprintf(f, #name ": %le\n",params->name) #define WRITE_INT(name) fprintf(f, #name ": %d\n",params->name) // Densities: CDM, baryons, total matter, curvature WRITE_DOUBLE(Omega_c); WRITE_DOUBLE(Omega_b); WRITE_DOUBLE(Omega_m); WRITE_DOUBLE(Omega_k); WRITE_INT(k_sign); // Dark Energy WRITE_DOUBLE(w0); WRITE_DOUBLE(wa); // Hubble parameters WRITE_DOUBLE(H0); WRITE_DOUBLE(h); // Neutrino properties WRITE_DOUBLE(Neff); WRITE_INT(N_nu_mass); WRITE_DOUBLE(N_nu_rel); if (params->N_nu_mass>0){ fprintf(f, "m_nu: ["); for (int i=0; i<params->N_nu_mass; i++){ fprintf(f, "%le, ", params->m_nu[i]); } fprintf(f, "]\n"); } WRITE_DOUBLE(sum_nu_masses); WRITE_DOUBLE(Omega_nu_mass); WRITE_DOUBLE(Omega_nu_rel); // Primordial power spectra WRITE_DOUBLE(A_s); WRITE_DOUBLE(n_s); // Radiation parameters WRITE_DOUBLE(Omega_g); WRITE_DOUBLE(T_CMB); // BCM baryonic model parameters WRITE_DOUBLE(bcm_log10Mc); WRITE_DOUBLE(bcm_etab); WRITE_DOUBLE(bcm_ks); // Modified gravity parameters WRITE_DOUBLE(mu_0); WRITE_DOUBLE(sigma_0); // Derived parameters WRITE_DOUBLE(sigma8); WRITE_DOUBLE(Omega_l); WRITE_DOUBLE(z_star); WRITE_INT(has_mgrowth); WRITE_INT(nz_mgrowth); if (params->has_mgrowth){ fprintf(f, "z_mgrowth: ["); for (int i=0; i<params->nz_mgrowth; i++){ fprintf(f, "%le, ", params->z_mgrowth[i]); } fprintf(f, "]\n"); fprintf(f, "df_mgrowth: ["); for (int i=0; i<params->nz_mgrowth; i++){ fprintf(f, "%le, ", params->df_mgrowth[i]); } fprintf(f, "]\n"); } #undef WRITE_DOUBLE #undef WRITE_INT fclose(f); } /** * Write a cosmology parameters object to a file in yaml format. * @param cosmo Cosmological parameters * @param f FILE* pointer opened for reading * @return void */ ccl_parameters ccl_parameters_read_yaml(const char * filename, int *status) { FILE * f = fopen(filename, "r"); if (!f) { *status = CCL_ERROR_FILE_READ; ccl_parameters bad_params; ccl_raise_warning(CCL_ERROR_FILE_READ, "ccl_core.c: ccl_parameters_read_yaml(): " "Failed to read parameters from file."); return bad_params; } #define READ_DOUBLE(name) double name; *status |= (0==fscanf(f, #name ": %le\n",&name)); #define READ_INT(name) int name; *status |= (0==fscanf(f, #name ": %d\n",&name)) // Densities: CDM, baryons, total matter, curvature READ_DOUBLE(Omega_c); READ_DOUBLE(Omega_b); READ_DOUBLE(Omega_m); READ_DOUBLE(Omega_k); READ_INT(k_sign); // Dark Energy READ_DOUBLE(w0); READ_DOUBLE(wa); // Hubble parameters READ_DOUBLE(H0); READ_DOUBLE(h); // Neutrino properties READ_DOUBLE(Neff); READ_INT(N_nu_mass); READ_DOUBLE(N_nu_rel); double mnu[3] = {0.0, 0.0, 0.0}; if (N_nu_mass>0){ *status |= (0==fscanf(f, "m_nu: [")); for (int i=0; i<N_nu_mass; i++){ *status |= (0==fscanf(f, "%le, ", mnu+i)); } *status |= (0==fscanf(f, "]\n")); } READ_DOUBLE(sum_nu_masses); READ_DOUBLE(Omega_nu_mass); READ_DOUBLE(Omega_nu_rel); // Primordial power spectra READ_DOUBLE(A_s); READ_DOUBLE(n_s); // Radiation parameters READ_DOUBLE(Omega_g); READ_DOUBLE(T_CMB); // BCM baryonic model parameters READ_DOUBLE(bcm_log10Mc); READ_DOUBLE(bcm_etab); READ_DOUBLE(bcm_ks); // Modified gravity parameters READ_DOUBLE(mu_0); READ_DOUBLE(sigma_0); // Derived parameters READ_DOUBLE(sigma8); READ_DOUBLE(Omega_l); READ_DOUBLE(z_star); READ_INT(has_mgrowth); READ_INT(nz_mgrowth); double *z_mgrowth; double *df_mgrowth; if (has_mgrowth){ z_mgrowth = malloc(nz_mgrowth*sizeof(double)); df_mgrowth = malloc(nz_mgrowth*sizeof(double)); *status |= (0==fscanf(f, "z_mgrowth: [")); for (int i=0; i<nz_mgrowth; i++){ *status |= (0==fscanf(f, "%le, ", z_mgrowth+i)); } *status |= (0==fscanf(f, "]\n")); *status |= (0==fscanf(f, "df_mgrowth: [")); for (int i=0; i<nz_mgrowth; i++){ *status |= (0==fscanf(f, "%le, ", df_mgrowth+i)); } *status |= (0==fscanf(f, "]\n")); } else{ z_mgrowth = NULL; df_mgrowth = NULL; } #undef READ_DOUBLE #undef READ_INT fclose(f); if (*status) { ccl_raise_warning( *status, "ccl_core.c: ccl_parameters_read_yaml():" "Structure of YAML file incorrect: %s", filename); } double norm_pk; if (isnan(A_s)){ norm_pk = sigma8; } else{ norm_pk = A_s; } ccl_parameters params = ccl_parameters_create( Omega_c, Omega_b, Omega_k, Neff, mnu, N_nu_mass, w0, wa, h, norm_pk, n_s, bcm_log10Mc, bcm_etab, bcm_ks, mu_0, sigma_0, nz_mgrowth, z_mgrowth, df_mgrowth, status); if(z_mgrowth) free(z_mgrowth); if (df_mgrowth) free(df_mgrowth); return params; } /* ------- ROUTINE: ccl_data_free -------- INPUT: ccl_data TASK: free the input data */ void ccl_data_free(ccl_data * data) { //We cannot assume that all of these have been allocated //TODO: it would actually make more sense to do this within ccl_cosmology_free, //where we could make use of the flags "computed_distances" etc. to figure out //what to free up gsl_spline_free(data->chi); gsl_spline_free(data->growth); gsl_spline_free(data->fgrowth); gsl_spline_free(data->E); gsl_spline_free(data->achi); gsl_spline2d_free(data->logsigma); ccl_f1d_t_free(data->rsd_splines[0]); ccl_f1d_t_free(data->rsd_splines[1]); ccl_f1d_t_free(data->rsd_splines[2]); } /* ------- ROUTINE: ccl_cosmology_set_status_message -------- INPUT: ccl_cosmology struct, status_string TASK: set the status message safely. */ void ccl_cosmology_set_status_message(ccl_cosmology * cosmo, const char * message, ...) { const int trunc = 480; /* must be < 500 - 4 */ va_list va; va_start(va, message); #pragma omp critical { vsnprintf(cosmo->status_message, trunc, message, va); /* if truncation happens, message[trunc - 1] is not NULL, ... will show up. */ strcpy(&cosmo->status_message[trunc], "..."); } va_end(va); } /* ------- ROUTINE: ccl_parameters_free -------- INPUT: ccl_parameters struct TASK: free allocated quantities in the parameters struct */ void ccl_parameters_free(ccl_parameters * params) { if (params->m_nu != NULL){ free(params->m_nu); params->m_nu = NULL; } if (params->z_mgrowth != NULL){ free(params->z_mgrowth); params->z_mgrowth = NULL; } if (params->df_mgrowth != NULL){ free(params->df_mgrowth); params->df_mgrowth = NULL; } } /* ------- ROUTINE: ccl_cosmology_free -------- INPUT: ccl_cosmology struct TASK: free the input data and the cosmology struct */ void ccl_cosmology_free(ccl_cosmology * cosmo) { if (cosmo != NULL) ccl_data_free(&cosmo->data); free(cosmo); } int ccl_get_pk_spline_na(ccl_cosmology *cosmo) { return cosmo->spline_params.A_SPLINE_NA_PK + cosmo->spline_params.A_SPLINE_NLOG_PK - 1; } void ccl_get_pk_spline_a_array(ccl_cosmology *cosmo,int ndout,double* doutput,int *status) { double *d = NULL; if (ndout != ccl_get_pk_spline_na(cosmo)) *status = CCL_ERROR_INCONSISTENT; if (*status == 0) { d = ccl_linlog_spacing(cosmo->spline_params.A_SPLINE_MINLOG_PK, cosmo->spline_params.A_SPLINE_MIN_PK, cosmo->spline_params.A_SPLINE_MAX, cosmo->spline_params.A_SPLINE_NLOG_PK, cosmo->spline_params.A_SPLINE_NA_PK); if (d == NULL) *status = CCL_ERROR_MEMORY; } if(*status==0) memcpy(doutput, d, ndout*sizeof(double)); free(d); } int ccl_get_pk_spline_nk(ccl_cosmology *cosmo) { double ndecades = log10(cosmo->spline_params.K_MAX) - log10(cosmo->spline_params.K_MIN); return (int)ceil(ndecades*cosmo->spline_params.N_K); } void ccl_get_pk_spline_lk_array(ccl_cosmology *cosmo,int ndout,double* doutput,int *status) { double *d = NULL; if (ndout != ccl_get_pk_spline_nk(cosmo)) *status = CCL_ERROR_INCONSISTENT; if (*status == 0) { d = ccl_log_spacing(cosmo->spline_params.K_MIN, cosmo->spline_params.K_MAX, ndout); if (d == NULL) *status = CCL_ERROR_MEMORY; } if (*status == 0) { for(int ii=0; ii < ndout; ii++) doutput[ii] = log(d[ii]); } free(d); }
GB_unaryop__identity_int32_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_int32 // op(A') function: GB_tran__identity_int32_int32 // C type: int32_t // A type: int32_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_int32 ( int32_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fill_nr_s8.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <math.h> //#include <omp.h> #include "config.h" #include "cint.h" #include "cvhf.h" #include "nr_direct.h" #include "optimizer.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) int GTOmax_shell_dim(int *ao_loc, int *shls, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); void int2e_optimizer(CINTOpt **opt, int *atm, int natm, int *bas, int nbas, double *env); /* * 8-fold symmetry, k>=l, k>=i>=j, */ static void fillnr_s8(int (*intor)(), int (*fprescreen)(), double *eri, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { const int *atm = envs->atm; const int *bas = envs->bas; const double *env = envs->env; const int natm = envs->natm; const int nbas = envs->nbas; const int *ao_loc = envs->ao_loc; const CINTOpt *cintopt = envs->cintopt; const int nao = ao_loc[nbas]; const size_t nao2 = nao * nao; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; double *cache = eri + di * dj * nao2; int dims[4] = {nao, nao, dj, di}; int ksh, lsh, ij, k, l; int shls[4]; double *peri; shls[2] = jsh; shls[3] = ish; for (ksh = 0; ksh <= ish; ksh++) { for (lsh = 0; lsh <= ksh; lsh++) { shls[0] = lsh; shls[1] = ksh; peri = eri + ao_loc[ksh] * nao + ao_loc[lsh]; if ((*fprescreen)(shls, vhfopt, atm, bas, env)) { (*intor)(peri, dims, shls, atm, natm, bas, nbas, env, cintopt, cache); } else { for (ij = 0; ij < di*dj; ij++) { for (k = 0; k < ao_loc[ksh+1]-ao_loc[ksh]; k++) { for (l = 0; l < ao_loc[lsh+1]-ao_loc[lsh]; l++) { peri[k*nao+l] = 0; } } peri += nao2; } } } } } static void store_ij(int (*intor)(), double *eri, double *buf, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { const int nbas = envs->nbas; const int *ao_loc = envs->ao_loc; const int nao = ao_loc[nbas]; const size_t nao2 = nao * nao; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; int i, j, k, l, i0, j0, kl; size_t ij0; double *peri, *pbuf; fillnr_s8(intor, vhfopt->fprescreen, buf, ish, jsh, vhfopt, envs); for (i0 = ao_loc[ish], i = 0; i < di; i++, i0++) { for (j0 = ao_loc[jsh], j = 0; j < dj; j++, j0++) { if (i0 >= j0) { ij0 = i0*(i0+1)/2 + j0; peri = eri + ij0*(ij0+1)/2; pbuf = buf + nao2 * (i*dj+j); for (kl = 0, k = 0; k < i0; k++) { for (l = 0; l <= k; l++, kl++) { peri[kl] = pbuf[k*nao+l]; } } // k == i0 for (l = 0; l <= j0; l++, kl++) { peri[kl] = pbuf[k*nao+l]; } } } } } void GTO2e_cart_or_sph(int (*intor)(), CINTOpt *cintopt, double *eri, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { const int nao = ao_loc[nbas]; IntorEnvs envs = {natm, nbas, atm, bas, env, NULL, ao_loc, NULL, cintopt, 1}; CVHFOpt *vhfopt; CVHFnr_optimizer(&vhfopt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env); vhfopt->fprescreen = CVHFnr_schwarz_cond; int shls_slice[] = {0, nbas}; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 1); const int cache_size = GTOmax_cache_size(intor, shls_slice, 1, atm, natm, bas, nbas, env); #pragma omp parallel { int i, j, ij; double *buf = malloc(sizeof(double) * (di*di*nao*nao + cache_size)); #pragma omp for nowait schedule(dynamic, 2) for (ij = 0; ij < nbas*(nbas+1)/2; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - (i*(i+1)/2); store_ij(intor, eri, buf, i, j, vhfopt, &envs); } free(buf); } CVHFdel_optimizer(&vhfopt); }
dnnl_utils_avx512.h
//===- dnnl_utils_avx512.h ------------------------------------------------===// // // Copyright (C) 2019-2020 Alibaba Group Holding Limited. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ============================================================================= #include <immintrin.h> #include <omp.h> namespace dnnl_utils { static int calculat_offset(int len, int vec_size) { /* calculate the offset when using intrinsics. example: when len is 108 vec_size is 32 when using bf16 the result is 108 % 32 = 12 so we need to set the mask to 0b00000000000000000000111111111111 */ int offset = len; int expo = 0; int dst = 0; while (offset - vec_size > 0) { offset -= vec_size; } while (offset > 0) { dst += pow(2, expo); offset -= 1; expo += 1; } return dst; } #if defined(__GNUC__) && (__GNUC__ > 9) inline void binary_s32_func(dnnl::algorithm alg, int32_t* lhs, int32_t* rhs, int32_t* dst, int len) { int i = 0; int vec_size = 512 / 32; __mmask16 mask16 = 0xFFFF; __m512i (*__mm512_binary_op)(__m512i, __m512i); switch (alg) { case dnnl::algorithm::binary_add: __mm512_binary_op = [](__m512i a, __m512i b) { return _mm512_add_epi32(a, b); }; break; case dnnl::algorithm::binary_mul: __mm512_binary_op = [](__m512i a, __m512i b) { return _mm512_mul_epi32(a, b); }; break; default: break; } for (; i <= len - vec_size; i += vec_size) { auto a1 = _mm512_loadu_epi32(lhs + i); auto b1 = _mm512_loadu_epi32(rhs + i); auto out1 = __mm512_binary_op(a1, b1); _mm512_mask_storeu_epi32(dst + i, mask16, out1); } if (len - i) { auto tail_mask = calculat_offset(len - i, vec_size); auto a1 = _mm512_maskz_loadu_epi32(tail_mask, lhs + i); auto b1 = _mm512_maskz_loadu_epi32(tail_mask, rhs + i); auto out1 = __mm512_binary_op(a1, b1); _mm512_mask_storeu_epi32(dst + i, tail_mask, out1); } } #else inline void binary_s32_func(dnnl::algorithm alg, int32_t* lhs, int32_t* rhs, int32_t* dst, int len) { assert(0); } #endif inline __m512 _mm512_cvtbf16f32_load(__mmask16 mask, void* mem_addr) { auto dst = _mm512_slli_epi32( _mm512_cvtepu16_epi32(_mm256_maskz_loadu_epi16(mask, mem_addr)), 0x10); return _mm512_castsi512_ps(dst); } inline void gather_func(char* params, int32_t* idx, size_t batch_size, size_t idx_size, size_t axis_size, size_t inner_size, size_t byte_size, char* dst) { size_t slice_bytes = inner_size * byte_size; #pragma omp parallel for for (int i = 0; i < batch_size; i++) { for (int j = 0; j < idx_size; j++) { int32_t curr_idx = idx[j]; memcpy(dst + (i * idx_size + j) * slice_bytes, params + (i * axis_size + curr_idx) * slice_bytes, slice_bytes); } } } #if defined(__GNUC__) && (__GNUC__ > 9) inline void floorbf_func(int len, int16_t* src, float* dst) { int i = 0; int vec_size = 512 / 16; __mmask16 mask16 = 0xFFFF; auto alpha_vec = _mm512_set1_ps(0.0); for (; i <= len - vec_size; i += vec_size) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto a1 = _mm512_cvtbf16f32_load(mask16, src + i + 16); auto out0 = _mm512_floor_ps(a0); auto out1 = _mm512_floor_ps(a1); auto C_bf16 = _mm512_cvtne2ps_pbh(out1, out0); _mm512_mask_storeu_ps(dst + i / 2, mask16, _mm512_castsi512_ps(C_bf16)); } if ((len - i) > 16) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto out0 = _mm512_floor_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_storeu_ps(dst + i / 2, _mm256_castsi256_ps(C_bf16)); i += vec_size / 2; } if (len - i) { __mmask16 tail_mask = calculat_offset(i, vec_size); auto a0 = _mm512_cvtbf16f32_load(tail_mask, src + i); auto out0 = _mm512_floor_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_mask_storeu_ps(dst + i, tail_mask, _mm256_castsi256_ps(C_bf16)); } } #elif defined(__GNUC__) && (__GNUC__ > 8) inline void floorbf_func(int len, int16_t* src, float* dst) { int i = 0; int vec_size = 512 / 32; __mmask16 mask16 = 0xFFFF; auto alpha_vec = _mm512_set1_ps(0.0); auto tail_mask = calculat_offset(len, vec_size); for (; i <= len - vec_size; i += vec_size) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto out0 = _mm512_floor_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_storeu_ps(dst + i, _mm256_castsi256_ps(C_bf16)); } if (len - i) { auto a0 = _mm512_cvtbf16f32_load(tail_mask, src + i); auto out0 = _mm512_floor_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_mask_storeu_ps(dst + i, tail_mask, _mm256_castsi256_ps(C_bf16)); } } #else inline void floorbf_func(int len, int16_t* src, float* dst) { assert(0); } #endif inline void floorf_func(int len, float* src, float* dst) { int i = 0; int vec_size = 512 / 32; __mmask16 mask16 = 0xFFFF; for (; i <= len - vec_size; i += vec_size) { auto a1 = _mm512_loadu_ps(src + i); auto out1 = _mm512_floor_ps(a1); _mm512_mask_storeu_ps(dst + i, mask16, out1); } if (len - i) { auto tail_mask = calculat_offset(len - i, vec_size); auto a1 = _mm512_maskz_loadu_ps(tail_mask, src + i); auto out1 = _mm512_floor_ps(a1); _mm512_mask_storeu_ps(dst + i, tail_mask, out1); } } inline void rsqrtf_func(int len, float* src, float* dst) { int i = 0; int vec_size = 512 / 32; __mmask16 mask16 = 0xFFFF; for (; i <= len - vec_size; i += vec_size) { auto a1 = _mm512_loadu_ps(src + i); auto out1 = _mm512_rsqrt14_ps(a1); _mm512_mask_storeu_ps(dst + i, mask16, out1); } if (len - i) { auto tail_mask = calculat_offset(len - i, vec_size); auto a1 = _mm512_maskz_loadu_ps(tail_mask, src + i); auto out1 = _mm512_rsqrt14_ps(a1); _mm512_mask_storeu_ps(dst + i, tail_mask, out1); } } #if defined(__GNUC__) && (__GNUC__ > 9) inline void rsqrtbf_func(int len, int16_t* src, float* dst) { int i = 0; int vec_size = 512 / 16; __mmask16 mask16 = 0xFFFF; auto alpha_vec = _mm512_set1_ps(0.0); for (; i <= len - vec_size; i += vec_size) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto a1 = _mm512_cvtbf16f32_load(mask16, src + i + 16); auto out0 = _mm512_rsqrt14_ps(a0); auto out1 = _mm512_rsqrt14_ps(a1); auto C_bf16 = _mm512_cvtne2ps_pbh(out1, out0); _mm512_mask_storeu_ps(dst + i / 2, mask16, _mm512_castsi512_ps(C_bf16)); } if ((len - i) > 16) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto out0 = _mm512_rsqrt14_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_storeu_ps(dst + i / 2, _mm256_castsi256_ps(C_bf16)); i += 16; } if (len - i) { auto tail_mask = calculat_offset(len - i, vec_size); auto a0 = _mm512_cvtbf16f32_load(tail_mask, src + i); auto out0 = _mm512_rsqrt14_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_mask_storeu_ps(dst + i, tail_mask, _mm256_castsi256_ps(C_bf16)); } } #elif defined(__GNUC__) && (__GNUC__ > 8) inline void rsqrtbf_func(int len, int16_t* src, float* dst) { int i = 0; int vec_size = 512 / 32; __mmask16 mask16 = 0xFFFF; auto alpha_vec = _mm512_set1_ps(0.0); auto tail_mask = calculat_offset(len, vec_size); for (; i <= len - vec_size; i += vec_size) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto out0 = _mm512_rsqrt14_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_storeu_ps(dst + i, _mm256_castsi256_ps(C_bf16)); } if (len - i) { auto a0 = _mm512_cvtbf16f32_load(tail_mask, src + i); auto out0 = _mm512_rsqrt14_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_mask_storeu_ps(dst + i, tail_mask, _mm256_castsi256_ps(C_bf16)); } } #else inline void rsqrtbf_func(int len, int16_t* src, float* dst) {} #endif #if defined(__GNUC__) && (__GNUC__ > 9) static inline __m512 pexp(const __m512& _x) { __m512 p16f_1 = _mm512_set1_ps(1.0f); __m512 p16f_half = _mm512_set1_ps(0.5f); __m512 p16f_127 = _mm512_set1_ps(127.f); __m512 p16f_exp_hi = _mm512_set1_ps(88.3762626647950f); __m512 p16f_exp_lo = _mm512_set1_ps(-88.3762626647949f); __m512 p16f_cephes_LOG2EF = _mm512_set1_ps(1.44269504088896341f); __m512 p16f_cephes_exp_p0 = _mm512_set1_ps(1.9875691500E-4f); __m512 p16f_cephes_exp_p1 = _mm512_set1_ps(1.3981999507E-3f); __m512 p16f_cephes_exp_p2 = _mm512_set1_ps(8.3334519073E-3f); __m512 p16f_cephes_exp_p3 = _mm512_set1_ps(4.1665795894E-2f); __m512 p16f_cephes_exp_p4 = _mm512_set1_ps(1.6666665459E-1f); __m512 p16f_cephes_exp_p5 = _mm512_set1_ps(5.0000001201E-1f); // Clamp x. __m512 x = _mm512_max_ps(_mm512_min_ps(_x, p16f_exp_hi), p16f_exp_lo); // Express exp(x) as exp(m*ln(2) + r), start by extracting // m = floor(x/ln(2) + 0.5). __m512 m = _mm512_floor_ps(_mm512_fmadd_ps(x, p16f_cephes_LOG2EF, p16f_half)); // Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is // subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating // truncation errors. Note that we don't use the "pmadd" function here to // ensure that a precision-preserving FMA instruction is used. __m512 p16f_nln2 = _mm512_set1_ps(-0.6931471805599453f); __m512 r = _mm512_fmadd_ps(m, p16f_nln2, x); __m512 r2 = _mm512_mul_ps(r, r); // TODO(gonnet): Split into odd/even polynomials and try to exploit // instruction-level parallelism. __m512 y = p16f_cephes_exp_p0; y = _mm512_fmadd_ps(y, r, p16f_cephes_exp_p1); y = _mm512_fmadd_ps(y, r, p16f_cephes_exp_p2); y = _mm512_fmadd_ps(y, r, p16f_cephes_exp_p3); y = _mm512_fmadd_ps(y, r, p16f_cephes_exp_p4); y = _mm512_fmadd_ps(y, r, p16f_cephes_exp_p5); y = _mm512_fmadd_ps(y, r2, r); y = _mm512_add_ps(y, p16f_1); // Build emm0 = 2^m. __m512i emm0 = _mm512_cvttps_epi32(_mm512_add_ps(m, p16f_127)); emm0 = _mm512_slli_epi32(emm0, 23); // Return 2^m * exp(r). return _mm512_max_ps(_mm512_mul_ps(y, _mm512_castsi512_ps(emm0)), _x); }; static inline __m512 erf_avx512(const __m512& src512) { const __m512 coeff0 = _mm512_set1_ps(+7.853861353153693E-5); const __m512 coeff1 = _mm512_set1_ps(-8.010193625184903E-4); const __m512 coeff2 = _mm512_set1_ps(+5.188327685732524E-3); const __m512 coeff3 = _mm512_set1_ps(-2.685381193529856E-2); const __m512 coeff4 = _mm512_set1_ps(+1.128358514861418E-1); const __m512 coeff5 = _mm512_set1_ps(-3.761262582423300E-1); const __m512 coeff6 = _mm512_set1_ps(+1.128379165726710E+0); __m512 dst512; __m512 base512 = _mm512_mul_ps(src512, src512); dst512 = _mm512_fmadd_ps(coeff0, base512, coeff1); dst512 = _mm512_fmadd_ps(dst512, base512, coeff2); dst512 = _mm512_fmadd_ps(dst512, base512, coeff3); dst512 = _mm512_fmadd_ps(dst512, base512, coeff4); dst512 = _mm512_fmadd_ps(dst512, base512, coeff5); dst512 = _mm512_fmadd_ps(dst512, base512, coeff6); dst512 = _mm512_mul_ps(dst512, src512); return dst512; } static inline __m512 erfc_avx512(const __m512& src512) { const __m512 Pcoeff0 = _mm512_set1_ps(+2.326819970068386E-2); const __m512 Pcoeff1 = _mm512_set1_ps(-1.387039388740657E-1); const __m512 Pcoeff2 = _mm512_set1_ps(+3.687424674597105E-1); const __m512 Pcoeff3 = _mm512_set1_ps(-5.824733027278666E-1); const __m512 Pcoeff4 = _mm512_set1_ps(+6.210004621745983E-1); const __m512 Pcoeff5 = _mm512_set1_ps(-4.944515323274145E-1); const __m512 Pcoeff6 = _mm512_set1_ps(+3.404879937665872E-1); const __m512 Pcoeff7 = _mm512_set1_ps(-2.741127028184656E-1); const __m512 Pcoeff8 = _mm512_set1_ps(+5.638259427386472E-1); const __m512 Rcoeff0 = _mm512_set1_ps(-1.047766399936249E+1); const __m512 Rcoeff1 = _mm512_set1_ps(+1.297719955372516E+1); const __m512 Rcoeff2 = _mm512_set1_ps(-7.495518717768503E+0); const __m512 Rcoeff3 = _mm512_set1_ps(+2.921019019210786E+0); const __m512 Rcoeff4 = _mm512_set1_ps(-1.015265279202700E+0); const __m512 Rcoeff5 = _mm512_set1_ps(+4.218463358204948E-1); const __m512 Rcoeff6 = _mm512_set1_ps(-2.820767439740514E-1); const __m512 Rcoeff7 = _mm512_set1_ps(+5.641895067754075E-1); const __m512 one = _mm512_set1_ps(1.0); const __m512 two = _mm512_set1_ps(2.0); const __m512 zero = _mm512_set1_ps(0.0); const __m512 MinorMaxlog = _mm512_set1_ps(-88.72283905206835); __m512 abssrc = _mm512_abs_ps(src512); __m512 nabssrc = _mm512_sub_ps(zero, abssrc); __m512 v = _mm512_mul_ps(abssrc, nabssrc); __m512 z = pexp(v); __m512 q = _mm512_div_ps(one, abssrc); __m512 y = _mm512_mul_ps(q, q); __mmask16 PCoeff_mask = _mm512_cmp_ps_mask(abssrc, two, _CMP_LT_OQ); // < 2 __mmask16 RCoeff_mask = ~PCoeff_mask; __m512 pP; __m512 pR; if (PCoeff_mask) { pP = _mm512_fmadd_ps(Pcoeff0, y, Pcoeff1); pP = _mm512_fmadd_ps(pP, y, Pcoeff2); pP = _mm512_fmadd_ps(pP, y, Pcoeff3); pP = _mm512_fmadd_ps(pP, y, Pcoeff4); pP = _mm512_fmadd_ps(pP, y, Pcoeff5); pP = _mm512_fmadd_ps(pP, y, Pcoeff6); pP = _mm512_fmadd_ps(pP, y, Pcoeff7); pP = _mm512_fmadd_ps(pP, y, Pcoeff8); } if (RCoeff_mask) { pR = _mm512_fmadd_ps(Rcoeff0, y, Rcoeff1); pR = _mm512_fmadd_ps(pR, y, Rcoeff2); pR = _mm512_fmadd_ps(pR, y, Rcoeff3); pR = _mm512_fmadd_ps(pR, y, Rcoeff4); pR = _mm512_fmadd_ps(pR, y, Rcoeff5); pR = _mm512_fmadd_ps(pR, y, Rcoeff6); pR = _mm512_fmadd_ps(pR, y, Rcoeff7); } pP = _mm512_mask_mov_ps(pP, RCoeff_mask, pR); // y = z * q * p; // float y_clamp = z < -kMaxlog ? 0 : y; // return x < 0 ? 2 - y_clamp : y_clamp; y = _mm512_mul_ps(z, q); y = _mm512_mul_ps(y, pP); __mmask16 y_clamp_mask = _mm512_cmp_ps_mask(z, MinorMaxlog, _CMP_LT_OQ); __m512 y_clamp = _mm512_mask_mov_ps(y, y_clamp_mask, zero); __mmask16 x_mask = _mm512_cmp_ps_mask(src512, zero, _CMP_LT_OQ); __m512 y_clamp2 = _mm512_sub_ps(two, y_clamp); y = _mm512_mask_mov_ps(y_clamp, x_mask, y_clamp2); y = _mm512_sub_ps(one, y); return y; } #endif template <typename T, typename Q> inline void splitter(const T& n, const Q& team, const Q& tid, T& n_start, T& n_end) { if (team <= 1 || n == 0) { n_start = 0; n_end = n; } else { T n1 = (n + (T)team - 1) / (T)team; T n2 = n1 - 1; T T1 = n - n2 * (T)team; n_end = (T)tid < T1 ? n1 : n2; n_start = (T)tid <= T1 ? tid * n1 : T1 * n1 + ((T)tid - T1) * n2; } n_end += n_start; } template <typename T0, typename F> void for_1d(const int& ithr, const int& nthr, const T0& D0, const F& func) { T0 d0{0}, end{0}; splitter(D0, nthr, ithr, d0, end); for (; d0 < end; ++d0) func(d0); } template <typename T0, typename F> void parallel_for(const T0& D0, const F& func) { #pragma omp parallel for_1d(omp_get_thread_num(), omp_get_num_threads(), D0, func); } inline bool parallel_it_step() { return true; } template <typename Q, typename R, typename... Args> inline bool parallel_it_step(Q& x, const R& X, Args&&... tuple) { if (parallel_it_step(static_cast<Args>(tuple)...)) { x = (x + 1) % X; return x == 0; } return false; } template <typename T> inline T parallel_it_init(T start) { return start; } template <typename T, typename Q, typename R, typename... Args> inline T parallel_it_init(T start, Q& x, const R& X, Args&&... tuple) { start = parallel_it_init(start, static_cast<Args>(tuple)...); x = start % X; return start / X; } template <typename T0, typename T1, typename F> void for_2d(const int& ithr, const int& nthr, const T0& D0, const T1& D1, const F& func) { const size_t work_amount = (size_t)D0 * D1; if (work_amount == 0) return; size_t start{0}, end{0}; splitter(work_amount, nthr, ithr, start, end); T0 d0{0}; T1 d1{0}; parallel_it_init(start, d0, D0, d1, D1); for (size_t iwork = start; iwork < end; ++iwork) { func(d0, d1); parallel_it_step(d0, D0, d1, D1); } } template <typename T0, typename T1, typename F> void parallel_for2d(const T0& D0, const T1& D1, const F& func) { #pragma omp parallel for_2d(omp_get_thread_num(), omp_get_num_threads(), D0, D1, func); } const int block_size = 16; typedef __m512 vec_type_f; typedef __m512i vec_type_i; typedef __mmask16 vmask_type; using SizeVector = std::vector<int>; inline int count(SizeVector dims, int start_ind, int end_ind) { size_t count = 1; for (size_t i = start_ind; i < end_ind; i++) count *= dims[i]; return static_cast<int>(count); } inline int count(SizeVector dims, size_t start_ind = 0) { return count(dims, start_ind, dims.size()); } static inline void _mm_uni_storeu_ps(float* pdst, const __m512& vec) { _mm512_storeu_ps(pdst, vec); } static inline void _mm_uni_storeu_si(void* pdst, const __m512i vec) { _mm512_storeu_si512(pdst, vec); } static inline __mmask16 _mm_uni_cmpgt_i32(__m512i vec0, __m512i vec1) { return _mm512_cmp_epi32_mask(vec1, vec0, 1); } static inline __mmask16 _mm_uni_cmpgt_ps(__m512 vec0, __m512 vec1) { return _mm512_cmp_ps_mask(vec0, vec1, 14); } static inline __m512 _mm_uni_any_ps() { return __m512{}; } static inline __m512i _mm_uni_any_epi32() { return __m512i{}; } static inline __m512i _mm_uni_set1_epi32(int value) { return _mm512_mask_set1_epi32(_mm_uni_any_epi32(), (__mmask16)-1, value); } static inline __m512i _mm_uni_setzero_si() { return _mm512_setzero_si512(); } static inline __m512 _mm_uni_blendv_ps(__m512 vec0, __m512 vec1, __m512 vmask) { return _mm512_mask_blend_ps( _mm512_cmpneq_epi32_mask(_mm512_castps_si512(vmask), _mm_uni_set1_epi32(0)), vec0, vec1); } static inline __m512 _mm_uni_blendv_ps(__m512 vec0, __m512 vec1, __mmask16 vmask) { return _mm512_mask_blend_ps(vmask, vec0, vec1); } struct cmpgt_ps { static inline vmask_type cmp_ps(const __m512 _Left, const __m512 _Right) { return _mm_uni_cmpgt_ps(_Left, _Right); } }; struct cmplt_ps { static inline vmask_type cmp_ps(const __m512 _Left, const __m512 _Right) { return _mm_uni_cmpgt_ps(_Right, _Left); } }; static inline __m512 _mm_uni_loadu_ps(const float* psrc) { return _mm512_mask_loadu_ps(_mm_uni_any_ps(), (__mmask16)-1, psrc); } template <class Compare1, template <typename> class Compare2> void top1_axis(const float* src_data, float* dst_data, int* dst_idx, SizeVector in_dims, int32_t axis, int before_num, int dim, int src_k, int count_vec, bool sort_value) { int after_num = count(in_dims, axis + 1, in_dims.size()); int first_index = 0; parallel_for2d(before_num, after_num / block_size, [&](int i0, int ib1) { int s_index = i0 * dim * after_num + ib1 * block_size; vec_type_f vmax_val = _mm_uni_loadu_ps(src_data + s_index); vec_type_i vindex_max_val = _mm_uni_setzero_si(); for (int i2 = 1; i2 < dim; i2++) { s_index += after_num; vec_type_f vsrc = _mm_uni_loadu_ps(src_data + s_index); vmask_type vmask = Compare1::cmp_ps(vsrc, vmax_val); vmax_val = _mm_uni_blendv_ps(vmax_val, vsrc, vmask); vec_type_i vindex_cur_val = _mm_uni_set1_epi32(i2); vindex_max_val = _mm512_mask_blend_epi32(vmask, vindex_max_val, vindex_cur_val); } if (dst_data) _mm_uni_storeu_ps(dst_data + i0 * after_num + ib1 * block_size, vmax_val); if (dst_idx) _mm_uni_storeu_si(reinterpret_cast<vec_type_i*>(dst_idx + i0 * after_num + ib1 * block_size), vindex_max_val); }); first_index = after_num / block_size * block_size; int rest = after_num - first_index; parallel_for2d(before_num, rest, [&](int i0, int i1) { int index_max_val = 0; int s_index = i0 * dim * after_num + first_index + i1; float max_val = src_data[s_index]; for (int i2 = 1; i2 < dim; i2++) { s_index += after_num; if (Compare2<float>()(src_data[s_index], max_val)) { max_val = src_data[s_index]; index_max_val = i2; } } if (dst_data) dst_data[i0 * after_num + first_index + i1] = max_val; if (dst_idx) dst_idx[i0 * after_num + first_index + i1] = index_max_val; }); } template <template <typename> class Compare> void top1(const float* src_data, float* dst_data, int* dst_idx, SizeVector in_dims, int32_t axis, int before_num, int dim, int src_k, int count_vec, bool sort_value) { parallel_for(before_num, [&](int i0) { int index_max_val = 0; int s_index = i0 * dim; float max_val = src_data[s_index]; for (int i1 = 1; i1 < dim; i1++) { s_index++; if (Compare<float>()(src_data[s_index], max_val)) { max_val = src_data[s_index]; index_max_val = i1; } } if (dst_data) dst_data[i0] = max_val; if (dst_idx) dst_idx[i0] = index_max_val; }); } template <class Compare1, template <typename> class Compare2> void topk_axis(const float* src_data, float* dst_data, int* dst_idx, SizeVector in_dims, int32_t axis, int before_num, int dim, int src_k, int count_vec, bool sort_value) { int after_num = count(in_dims, axis + 1, in_dims.size()); int first_index = 0; if (src_k < count_vec) { parallel_for2d(before_num, after_num / block_size, [&](int i0, int ib1) { const int N = 32; vec_type_f vmax_values[N]; vec_type_i vmax_indexes[N]; vec_type_f vtmp; vec_type_i vtmp_indexes; vmask_type vmask; int s_index = i0 * dim * after_num + ib1 * block_size; auto vswap_func = [&](int index1, int index2) { vtmp = vmax_values[index1]; vmax_values[index1] = _mm_uni_blendv_ps(vmax_values[index1], vmax_values[index2], vmask); vmax_values[index2] = _mm_uni_blendv_ps(vmax_values[index2], vtmp, vmask); vtmp_indexes = vmax_indexes[index1]; vmax_indexes[index1] = _mm512_mask_blend_epi32( vmask, vmax_indexes[index1], vmax_indexes[index2]); vmax_indexes[index2] = _mm512_mask_blend_epi32(vmask, vmax_indexes[index2], vtmp_indexes); }; for (int i2 = 0; i2 < src_k; i2++) { vmax_values[i2] = _mm_uni_loadu_ps(src_data + s_index); vmax_indexes[i2] = _mm_uni_set1_epi32(i2); s_index += after_num; } for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { vmask = Compare1::cmp_ps(vmax_values[i3], vmax_values[i3 - 1]); if (vmask) vswap_func(i3, i3 - 1); } } for (int i2 = src_k; i2 < dim; i2++) { vmax_values[src_k] = _mm_uni_loadu_ps(src_data + s_index); vmax_indexes[src_k] = _mm_uni_set1_epi32(i2); for (int i3 = src_k; i3 > 0; i3--) { vmask = Compare1::cmp_ps(vmax_values[i3], vmax_values[i3 - 1]); if (vmask) vswap_func(i3, i3 - 1); else break; } s_index += after_num; } if (!sort_value) { for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { vmask = _mm_uni_cmpgt_i32(vmax_indexes[i3 - 1], vmax_indexes[i3]); if (vmask) vswap_func(i3, i3 - 1); else break; } } } if (dst_data) { for (int i2 = 0; i2 < src_k; i2++) _mm_uni_storeu_ps( dst_data + (i0 * src_k + i2) * after_num + ib1 * block_size, vmax_values[i2]); } if (dst_idx) { for (int i2 = 0; i2 < src_k; i2++) _mm_uni_storeu_si( reinterpret_cast<vec_type_i*>( dst_idx + (i0 * src_k + i2) * after_num + ib1 * block_size), vmax_indexes[i2]); } }); first_index = after_num / block_size * block_size; } int rest = after_num - first_index; parallel_for2d(before_num, rest, [&](int i0, int i1) { std::vector<float> max_values(src_k + 1); std::vector<int> max_indexes(src_k + 1); float tmp_value; int tmp_index; int s_index = i0 * dim * after_num + first_index + i1; auto swap_func = [&](int index1, int index2) { tmp_value = max_values[index1]; max_values[index1] = max_values[index2]; max_values[index2] = tmp_value; tmp_index = max_indexes[index1]; max_indexes[index1] = max_indexes[index2]; max_indexes[index2] = tmp_index; }; for (int i2 = 0; i2 < src_k; i2++) { max_values[i2] = src_data[s_index]; max_indexes[i2] = i2; s_index += after_num; } for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { if (Compare2<float>()(max_values[i3], max_values[i3 - 1])) { swap_func(i3, i3 - 1); } } } for (int i2 = src_k; i2 < dim; i2++) { max_values[src_k] = src_data[s_index]; max_indexes[src_k] = i2; for (int i3 = src_k; i3 > 0; i3--) { if (Compare2<float>()(max_values[i3], max_values[i3 - 1])) swap_func(i3, i3 - 1); else break; } s_index += after_num; } if (!sort_value) { for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { if (std::greater<int>()(max_indexes[i3 - 1], max_indexes[i3])) { swap_func(i3, i3 - 1); } } } } if (dst_data) { for (int i2 = 0; i2 < src_k; i2++) dst_data[i0 * src_k * after_num + i2 * after_num + first_index + i1] = max_values[i2]; } if (dst_idx) { for (int i2 = 0; i2 < src_k; i2++) dst_idx[i0 * src_k * after_num + i2 * after_num + first_index + i1] = max_indexes[i2]; } }); } template <template <typename> class Compare> void topk(const float* src_data, float* dst_data, int* dst_idx, SizeVector in_dims, int32_t axis, int before_num, int dim, int src_k, int count_vec, bool sort_value) { parallel_for(before_num, [&](int i0) { std::vector<float> max_values(src_k + 1); std::vector<int> max_indexes(src_k + 1); float tmp_value; int tmp_index; int s_index = i0 * dim; auto swap_func = [&](int index1, int index2) { tmp_value = max_values[index1]; max_values[index1] = max_values[index2]; max_values[index2] = tmp_value; tmp_index = max_indexes[index1]; max_indexes[index1] = max_indexes[index2]; max_indexes[index2] = tmp_index; }; for (int i2 = 0; i2 < src_k; i2++) { max_values[i2] = src_data[s_index]; max_indexes[i2] = i2; s_index++; } for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { if (Compare<float>()(max_values[i3], max_values[i3 - 1])) { swap_func(i3, i3 - 1); } } } for (int i2 = src_k; i2 < dim; i2++) { max_values[src_k] = src_data[s_index]; max_indexes[src_k] = i2; for (int i3 = src_k; i3 > 0; i3--) { if (Compare<float>()(max_values[i3], max_values[i3 - 1])) swap_func(i3, i3 - 1); else break; } s_index++; } if (!sort_value) { for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { if (std::greater<int>()(max_indexes[i3 - 1], max_indexes[i3])) { swap_func(i3, i3 - 1); } } } } if (dst_data) { for (int i2 = 0; i2 < src_k; i2++) dst_data[i0 * src_k + i2] = max_values[i2]; } if (dst_idx) { for (int i2 = 0; i2 < src_k; i2++) dst_idx[i0 * src_k + i2] = max_indexes[i2]; } }); } void topk_func(float* src, float* dst_data, int* dst_idx, std::vector<int32_t> src_dims, uint32_t K, bool largest, bool sorted, uint32_t axis) { auto in_dims = src_dims; size_t axis_dim; size_t axis_stride = 1; size_t axis_step = 1; int count_vec = 32; bool is_last_dim = false; int src_k = K; bool mode_max, sort_value; int dim, before_num; int axis_ = -1; if (axis_ < 0) axis_ += src_dims.size(); axis = static_cast<size_t>(axis_); if (largest) mode_max = true; else mode_max = false; if (sorted) sort_value = true; else sort_value = false; int j; for (j = src_dims.size() - 1; j >= 0; j--) { if (src_dims[j] != 1) break; } if (static_cast<size_t>(j) == axis) is_last_dim = true; for (size_t i = 0; i < axis; i++) { axis_step *= src_dims[i]; } axis_dim = src_dims[axis]; for (size_t i = (axis + 1); i < src_dims.size(); i++) { axis_stride *= src_dims[i]; } dim = static_cast<int>(src_dims[axis]); before_num = count(src_dims, 0, axis); if (src_k == 1) { if (is_last_dim) { if (mode_max) top1<std::greater>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); else top1<std::less>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); } else { if (mode_max) top1_axis<cmpgt_ps, std::greater>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); else top1_axis<cmplt_ps, std::less>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); } } else { if (is_last_dim) { if (mode_max) { topk<std::greater>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); } else topk<std::less>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); } else { if (mode_max) topk_axis<cmpgt_ps, std::greater>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); else topk_axis<cmplt_ps, std::less>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); } } } #if defined(__GNUC__) && (__GNUC__ > 9) static __m512 __mm512_fake_erf(__m512 src) { auto abssrc = _mm512_abs_ps(src); __mmask16 erf_mask = _mm512_cmp_ps_mask(abssrc, _mm512_set1_ps(1.0), _CMP_LT_OQ); // < 1 __m512 dst512 = erf_avx512(src); __m512 dstc512 = erfc_avx512(src); return _mm512_mask_blend_ps(erf_mask, dstc512, dst512); } static void erf_func(float* src, float* dst, size_t len) { int i; for (i = 0; i + 16 <= len; i += 16) { __m512 src512 = _mm512_loadu_ps(src + i); __m512 abssrc = _mm512_abs_ps(src512); __mmask16 erf_mask = _mm512_cmp_ps_mask(abssrc, _mm512_set1_ps(1.0), _CMP_LT_OQ); // < 1 __mmask16 erfc_mask = ~erf_mask; auto dst512 = __mm512_fake_erf(src512); _mm512_storeu_ps(dst + i, dst512); } int remain = len - i; if (remain) { __mmask16 mask = 0xffff; mask = mask >> (16 - remain); __m512 src512 = _mm512_maskz_loadu_ps(mask, src + i); __mmask16 erf_mask = _mm512_cmp_ps_mask(src512, _mm512_set1_ps(1.0), _CMP_LT_OQ); // < 1 __mmask16 erfc_mask = ~erf_mask; auto dst512 = __mm512_fake_erf(src512); _mm512_mask_storeu_ps(dst + i, mask, dst512); // printf("erf_p remain...\n"); } return; } #else static void erf_func(float* src, float* dst, size_t len) { assert(0); } #endif #if defined(__GNUC__) && (__GNUC__ > 9) static void erf_bf16_func(int16_t* src, float* dst, size_t len) { int i = 0; int vec_size = 512 / 16; __mmask16 mask16 = 0xFFFF; for (; i <= len - vec_size; i += vec_size) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto a1 = _mm512_cvtbf16f32_load(mask16, src + i + 16); auto erf_dst_a0 = __mm512_fake_erf(a0); auto erf_dst_a1 = __mm512_fake_erf(a1); auto C_bf16 = _mm512_cvtne2ps_pbh(erf_dst_a1, erf_dst_a0); _mm512_mask_storeu_ps(dst + i / 2, mask16, _mm512_castsi512_ps(C_bf16)); } if ((len - i) > 16) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto out0 = __mm512_fake_erf(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_storeu_ps(dst + i / 2, _mm256_castsi256_ps(C_bf16)); i += 16; } if (len - i) { auto tail_mask = calculat_offset(len - i, vec_size); auto a0 = _mm512_cvtbf16f32_load(tail_mask, src + i); auto out0 = __mm512_fake_erf(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_mask_storeu_ps(dst + i, tail_mask, _mm256_castsi256_ps(C_bf16)); } return; } #else static void erf_bf16_func(int16_t* src, float* dst, size_t len) { assert(0); } #endif // nms function related enum class boxEncoding { CORNER, CENTER }; struct filteredBoxes { float score; int class_index; int box_index; filteredBoxes() : score(0), class_index(0), box_index(0) {} filteredBoxes(float _score, int _class_index, int _box_index) : score(_score), class_index(_class_index), box_index(_box_index) {} }; struct Box { float score; int class_index; int box_index; Box() {} Box(float _score, int _class_index, int _box_index) : score(_score), class_index(_class_index), box_index(_box_index) {} }; void nms_func(float* boxes, float* scores, size_t batch_idx, size_t class_num, size_t num_boxes, size_t max_num_outputs, float score_threshold, float iou_threshold, int32_t* output_indices) { auto intersectionOverUnion = [](const float* boxesI, const float* boxesJ, boxEncoding boxEncodingType) { float yminI, xminI, ymaxI, xmaxI, yminJ, xminJ, ymaxJ, xmaxJ; if (boxEncodingType == boxEncoding::CENTER) { // box format: x_center, y_center, width, height yminI = boxesI[1] - boxesI[3] / 2.f; xminI = boxesI[0] - boxesI[2] / 2.f; ymaxI = boxesI[1] + boxesI[3] / 2.f; xmaxI = boxesI[0] + boxesI[2] / 2.f; yminJ = boxesJ[1] - boxesJ[3] / 2.f; xminJ = boxesJ[0] - boxesJ[2] / 2.f; ymaxJ = boxesJ[1] + boxesJ[3] / 2.f; xmaxJ = boxesJ[0] + boxesJ[2] / 2.f; } else { // box format: y1, x1, y2, x2 yminI = (std::min)(boxesI[0], boxesI[2]); xminI = (std::min)(boxesI[1], boxesI[3]); ymaxI = (std::max)(boxesI[0], boxesI[2]); xmaxI = (std::max)(boxesI[1], boxesI[3]); yminJ = (std::min)(boxesJ[0], boxesJ[2]); xminJ = (std::min)(boxesJ[1], boxesJ[3]); ymaxJ = (std::max)(boxesJ[0], boxesJ[2]); xmaxJ = (std::max)(boxesJ[1], boxesJ[3]); } float areaI = (ymaxI - yminI) * (xmaxI - xminI); float areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ); if (areaI <= 0.f || areaJ <= 0.f) return 0.f; float intersection_area = (std::max)((std::min)(ymaxI, ymaxJ) - (std::max)(yminI, yminJ), 0.f) * (std::max)((std::min)(xmaxI, xmaxJ) - (std::max)(xminI, xminJ), 0.f); return intersection_area / (areaI + areaJ - intersection_area); }; size_t numFiltBox; bool sort_result_descending = true; boxEncoding boxEncodingType = boxEncoding::CORNER; if (max_num_outputs == 0) { return; } std::vector<filteredBoxes> filtBoxes(num_boxes); std::vector<Box> sorted_boxes; for (int box_idx = 0; box_idx < num_boxes; box_idx++) { float* scores_ptr = scores + box_idx * class_num; int idx = std::max_element(scores_ptr, scores_ptr + class_num) - scores_ptr; float score = scores_ptr[idx]; if (score > score_threshold) { sorted_boxes.emplace_back(Box(score, idx, box_idx)); } } int io_selection_size = 0; if (sorted_boxes.size() > 0) { auto _compare = [](const Box l, const Box r) { return (l.score > r.score || ((l.score == r.score) && (l.box_index < r.box_index))); }; std::sort(sorted_boxes.begin(), sorted_boxes.end(), _compare); for (int i = 0; i < sorted_boxes.size(); i++) { auto score = sorted_boxes[i].score; auto idx = sorted_boxes[i].class_index; auto box_idx = sorted_boxes[i].box_index; } filtBoxes[0] = filteredBoxes(sorted_boxes[0].score, sorted_boxes[0].class_index, sorted_boxes[0].box_index); io_selection_size++; for (size_t box_idx = 1; (box_idx < sorted_boxes.size()) && (io_selection_size < max_num_outputs); box_idx++) { bool box_is_selected = true; for (int idx = io_selection_size - 1; idx >= 0; idx--) { float iou = intersectionOverUnion( &boxes[sorted_boxes[box_idx].box_index * 4], &boxes[filtBoxes[idx].box_index * 4], boxEncodingType); if (iou >= iou_threshold) { box_is_selected = false; break; } } if (box_is_selected) { filtBoxes[io_selection_size] = filteredBoxes( sorted_boxes[box_idx].score, sorted_boxes[box_idx].class_index, sorted_boxes[box_idx].box_index); io_selection_size++; } } } numFiltBox = io_selection_size; memset(output_indices, max_num_outputs * 3, 0); memset(output_indices, max_num_outputs, batch_idx); for (size_t idx = 0; idx < numFiltBox; idx++) { output_indices[max_num_outputs + 2 * idx] = filtBoxes[idx].class_index; output_indices[max_num_outputs + 2 * idx + 1] = filtBoxes[idx].box_index; } } } // namespace dnnl_utils
m_mesh_volume_surface_area.h
// // Created by Harold on 2020/10/30. // #ifndef M_MATH_M_MESH_VOLUME_SURFACE_AREA_H #define M_MATH_M_MESH_VOLUME_SURFACE_AREA_H #include <open3d/Open3D.h> #include <omp.h> namespace M_MATH { template<typename Scalar> inline Scalar SignedVolumeOfTriangle(Eigen::Matrix<Scalar, 3, 1> const& v1, Eigen::Matrix<Scalar, 3, 1> const& v2, Eigen::Matrix<Scalar, 3, 1> const& v3) { return v1.dot(v2.cross(v3)) / 6.0; } // require closed mesh with no intersecting/overlapping triangles template<typename Scalar> inline Scalar MeshVolume(std::vector<Eigen::Matrix<Scalar, 3, 1>> const& vertices, std::vector<Eigen::Vector3i> const& triangle_v_idx) { double volume = 0.; auto N = triangle_v_idx.size(); Eigen::Vector3i t; #pragma omp parallel for reduction(+:volume) private(t) for (auto i = 0; i < N; i++) { t = triangle_v_idx[i]; volume += SignedVolumeOfTriangle(vertices[t.x()], vertices[t.y()], vertices[t.z()]); } return std::abs(volume); } template<typename Scalar> inline Scalar AreaOfTriangle(Eigen::Matrix<Scalar, 3, 1> const& v1, Eigen::Matrix<Scalar, 3, 1> const& v2, Eigen::Matrix<Scalar, 3, 1> const& v3) { return 0.5 * (Eigen::Matrix<Scalar, 3, 1>(v1.x() - v2.x(), v1.y() - v2.y(), v1.z() - v2.z()) .cross(Eigen::Matrix<Scalar, 3, 1>(v1.x() - v3.x(), v1.y() - v3.y(), v1.z() - v3.z()))).norm(); } // require closed mesh with no intersecting/overlapping triangles template<typename Scalar> inline Scalar MeshSurface(std::vector<Eigen::Matrix<Scalar, 3, 1>> const& vertices, std::vector<Eigen::Vector3i> const& triangle_v_idx) { double area = 0.; auto N = triangle_v_idx.size(); Eigen::Vector3i t; #pragma omp parallel for reduction(+:area) private(t) for (auto i = 0; i < N; i++) { t = triangle_v_idx[i]; area += AreaOfTriangle(vertices[t.x()], vertices[t.y()], vertices[t.z()]); } return area; } // truncated triangular prism: V = 1/3 * A * (h1 + h2 + h3) template<typename Scalar> inline Scalar TriangularPrismVolume(Eigen::Matrix<Scalar, 3, 1> const& v1, Eigen::Matrix<Scalar, 3, 1> const& v2, Eigen::Matrix<Scalar, 3, 1> const& v3, Eigen::Matrix<Scalar, 3, 1> const& plane_center, Eigen::Matrix<Scalar, 3, 1> const& plane_normal) { auto pn = plane_normal.normalized(); // project vertices onto plane auto v1p = Eigen::Matrix<Scalar, 3, 1>(v1 - (v1 - plane_center).dot(plane_normal) * pn); auto v2p = Eigen::Matrix<Scalar, 3, 1>(v2 - (v2 - plane_center).dot(plane_normal) * pn); auto v3p = Eigen::Matrix<Scalar, 3, 1>(v3 - (v3 - plane_center).dot(plane_normal) * pn); auto A = AreaOfTriangle(v1p, v2p, v3p); auto h1 = (v1p - v1).norm(); auto h2 = (v2p - v2).norm(); auto h3 = (v3p - v3).norm(); return 1. / 3. * A * (h1 + h2 + h3); } // triangle mesh volume against certain plane (project triangle to plane and compute truncated triangular prism) template<typename Scalar> inline Scalar MeshVolume(std::vector<Eigen::Matrix<Scalar, 3, 1>> const& vertices, std::vector<Eigen::Vector3i> const& triangle_v_idx, Eigen::Matrix<Scalar, 3, 1> const& plane_center, Eigen::Matrix<Scalar, 3, 1> const& plane_normal) { double volume = 0.; auto N = triangle_v_idx.size(); Eigen::Vector3i t; #pragma omp parallel for reduction(+:volume) private(t) for (auto i = 0; i < N; i++) { t = triangle_v_idx[i]; volume += TriangularPrismVolume(vertices[t.x()], vertices[t.y()], vertices[t.z()], plane_center, plane_normal); } return cv::abs(volume); } } #endif //M_MATH_M_MESH_VOLUME_SURFACE_AREA_H
interaction.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include "interaction.h" #include <stdio.h> #include <stdlib.h> #include "bzgrid.h" #include "imag_self_energy_with_g.h" #include "lapack_wrapper.h" #include "phonoc_array.h" #include "real_to_reciprocal.h" #include "reciprocal_to_normal.h" static const long index_exchange[6][3] = {{0, 1, 2}, {2, 0, 1}, {1, 2, 0}, {2, 1, 0}, {0, 2, 1}, {1, 0, 2}}; static void real_to_normal( double *fc3_normal_squared, const long (*g_pos)[4], const long num_g_pos, const double *freqs0, const double *freqs1, const double *freqs2, const lapack_complex_double *eigvecs0, const lapack_complex_double *eigvecs1, const lapack_complex_double *eigvecs2, const double *fc3, const long is_compact_fc3, const double q_vecs[3][3], /* q0, q1, q2 */ const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const double *masses, const long *p2s_map, const long *s2p_map, const long *band_indices, const long num_band, const double cutoff_frequency, const long triplet_index, const long num_triplets, const long openmp_at_bands); static void real_to_normal_sym_q( double *fc3_normal_squared, const long (*g_pos)[4], const long num_g_pos, double *const freqs[3], lapack_complex_double *const eigvecs[3], const double *fc3, const long is_compact_fc3, const double q_vecs[3][3], /* q0, q1, q2 */ const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const double *masses, const long *p2s_map, const long *s2p_map, const long *band_indices, const long num_band0, const long num_band, const double cutoff_frequency, const long triplet_index, const long num_triplets, const long openmp_at_bands); /* fc3_normal_squared[num_triplets, num_band0, num_band, num_band] */ void itr_get_interaction(Darray *fc3_normal_squared, const char *g_zero, const Darray *frequencies, const lapack_complex_double *eigenvectors, const long (*triplets)[3], const long num_triplets, const ConstBZGrid *bzgrid, const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const double *masses, const long *p2s_map, const long *s2p_map, const long *band_indices, const long symmetrize_fc3_q, const double cutoff_frequency) { long openmp_per_triplets; long(*g_pos)[4]; long i; long num_band, num_band0, num_band_prod, num_g_pos; g_pos = NULL; num_band0 = fc3_normal_squared->dims[1]; num_band = frequencies->dims[1]; num_band_prod = num_band0 * num_band * num_band; if (num_triplets > num_band) { openmp_per_triplets = 1; } else { openmp_per_triplets = 0; } #ifdef PHPYOPENMP #pragma omp parallel for schedule(guided) private( \ num_g_pos, g_pos) if (openmp_per_triplets) #endif for (i = 0; i < num_triplets; i++) { g_pos = (long(*)[4])malloc(sizeof(long[4]) * num_band_prod); num_g_pos = ise_set_g_pos(g_pos, num_band0, num_band, g_zero + i * num_band_prod); itr_get_interaction_at_triplet( fc3_normal_squared->data + i * num_band_prod, num_band0, num_band, g_pos, num_g_pos, frequencies->data, eigenvectors, triplets[i], bzgrid, fc3, is_compact_fc3, svecs, multi_dims, multiplicity, masses, p2s_map, s2p_map, band_indices, symmetrize_fc3_q, cutoff_frequency, i, num_triplets, 1 - openmp_per_triplets); free(g_pos); g_pos = NULL; } } void itr_get_interaction_at_triplet( double *fc3_normal_squared, const long num_band0, const long num_band, const long (*g_pos)[4], const long num_g_pos, const double *frequencies, const lapack_complex_double *eigenvectors, const long triplet[3], const ConstBZGrid *bzgrid, const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const double *masses, const long *p2s_map, const long *s2p_map, const long *band_indices, const long symmetrize_fc3_q, const double cutoff_frequency, const long triplet_index, /* only for print */ const long num_triplets, /* only for print */ const long openmp_at_bands) { long j, k; double *freqs[3]; lapack_complex_double *eigvecs[3]; double q_vecs[3][3]; for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { q_vecs[j][k] = ((double)bzgrid->addresses[triplet[j]][k]) / bzgrid->D_diag[k]; } bzg_multiply_matrix_vector_ld3(q_vecs[j], bzgrid->Q, q_vecs[j]); } if (symmetrize_fc3_q) { for (j = 0; j < 3; j++) { freqs[j] = (double *)malloc(sizeof(double) * num_band); eigvecs[j] = (lapack_complex_double *)malloc( sizeof(lapack_complex_double) * num_band * num_band); for (k = 0; k < num_band; k++) { freqs[j][k] = frequencies[triplet[j] * num_band + k]; } for (k = 0; k < num_band * num_band; k++) { eigvecs[j][k] = eigenvectors[triplet[j] * num_band * num_band + k]; } } real_to_normal_sym_q( fc3_normal_squared, g_pos, num_g_pos, freqs, eigvecs, fc3, is_compact_fc3, q_vecs, /* q0, q1, q2 */ svecs, multi_dims, multiplicity, masses, p2s_map, s2p_map, band_indices, num_band0, num_band, cutoff_frequency, triplet_index, num_triplets, openmp_at_bands); for (j = 0; j < 3; j++) { free(freqs[j]); freqs[j] = NULL; free(eigvecs[j]); eigvecs[j] = NULL; } } else { real_to_normal(fc3_normal_squared, g_pos, num_g_pos, frequencies + triplet[0] * num_band, frequencies + triplet[1] * num_band, frequencies + triplet[2] * num_band, eigenvectors + triplet[0] * num_band * num_band, eigenvectors + triplet[1] * num_band * num_band, eigenvectors + triplet[2] * num_band * num_band, fc3, is_compact_fc3, q_vecs, /* q0, q1, q2 */ svecs, multi_dims, multiplicity, masses, p2s_map, s2p_map, band_indices, num_band, cutoff_frequency, triplet_index, num_triplets, openmp_at_bands); } } static void real_to_normal( double *fc3_normal_squared, const long (*g_pos)[4], const long num_g_pos, const double *freqs0, const double *freqs1, const double *freqs2, const lapack_complex_double *eigvecs0, const lapack_complex_double *eigvecs1, const lapack_complex_double *eigvecs2, const double *fc3, const long is_compact_fc3, const double q_vecs[3][3], /* q0, q1, q2 */ const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const double *masses, const long *p2s_map, const long *s2p_map, const long *band_indices, const long num_band, const double cutoff_frequency, const long triplet_index, const long num_triplets, const long openmp_at_bands) { lapack_complex_double *fc3_reciprocal; fc3_reciprocal = (lapack_complex_double *)malloc( sizeof(lapack_complex_double) * num_band * num_band * num_band); r2r_real_to_reciprocal(fc3_reciprocal, q_vecs, fc3, is_compact_fc3, svecs, multi_dims, multiplicity, p2s_map, s2p_map, openmp_at_bands); #ifdef MEASURE_R2N if (openmp_at_bands && num_triplets > 0) { printf("At triplet %d/%d (# of bands=%d):\n", triplet_index, num_triplets, num_band0); } #endif reciprocal_to_normal_squared( fc3_normal_squared, g_pos, num_g_pos, fc3_reciprocal, freqs0, freqs1, freqs2, eigvecs0, eigvecs1, eigvecs2, masses, band_indices, num_band, cutoff_frequency, openmp_at_bands); free(fc3_reciprocal); fc3_reciprocal = NULL; } static void real_to_normal_sym_q( double *fc3_normal_squared, const long (*g_pos)[4], const long num_g_pos, double *const freqs[3], lapack_complex_double *const eigvecs[3], const double *fc3, const long is_compact_fc3, const double q_vecs[3][3], /* q0, q1, q2 */ const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const double *masses, const long *p2s_map, const long *s2p_map, const long *band_indices, const long num_band0, const long num_band, const double cutoff_frequency, const long triplet_index, const long num_triplets, const long openmp_at_bands) { long i, j, k, l; long band_ex[3]; double q_vecs_ex[3][3]; double *fc3_normal_squared_ex; fc3_normal_squared_ex = (double *)malloc(sizeof(double) * num_band * num_band * num_band); for (i = 0; i < num_band0 * num_band * num_band; i++) { fc3_normal_squared[i] = 0; } for (i = 0; i < 6; i++) { for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { q_vecs_ex[j][k] = q_vecs[index_exchange[i][j]][k]; } } real_to_normal( fc3_normal_squared_ex, g_pos, num_g_pos, freqs[index_exchange[i][0]], freqs[index_exchange[i][1]], freqs[index_exchange[i][2]], eigvecs[index_exchange[i][0]], eigvecs[index_exchange[i][1]], eigvecs[index_exchange[i][2]], fc3, is_compact_fc3, q_vecs_ex, /* q0, q1, q2 */ svecs, multi_dims, multiplicity, masses, p2s_map, s2p_map, band_indices, num_band, cutoff_frequency, triplet_index, num_triplets, openmp_at_bands); for (j = 0; j < num_band0; j++) { for (k = 0; k < num_band; k++) { for (l = 0; l < num_band; l++) { band_ex[0] = band_indices[j]; band_ex[1] = k; band_ex[2] = l; fc3_normal_squared[j * num_band * num_band + k * num_band + l] += fc3_normal_squared_ex[band_ex[index_exchange[i][0]] * num_band * num_band + band_ex[index_exchange[i][1]] * num_band + band_ex[index_exchange[i][2]]] / 6; } } } } free(fc3_normal_squared_ex); }
ecryptfs_fmt_plug.c
/* Cracker for eCryptfs ~/.ecryptfs/wrapped-passphrase. * * We attack "login passphrase" instead of "mount passphrase" (and which could * be 128-bit random key!). * * "ecryptfs_unwrap_passphrase -> generate_passphrase_sig" in * src/libecryptfs/key_management.c is important. * * Do we need to do full decryption as done in "ecryptfs_unwrap_passphrase"? * I believe, 8 bytes of verification data ought to be enough for anybody! * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ecryptfs1; #elif FMT_REGISTERS_H john_register_one(&fmt_ecryptfs1); #else #include <string.h> #include <errno.h> #include "sha2.h" #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "base64_convert.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 8 // XXX #endif #endif #include "memdbg.h" //#undef SIMD_COEF_64 #define FORMAT_TAG "$ecryptfs$" #define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define FORMAT_LABEL "eCryptfs" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME #define BENCHMARK_COMMENT " (65536x)" // good luck with that! #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define REAL_BINARY_SIZE 8 #define HEX_BINARY_SIZE (REAL_BINARY_SIZE*2) #define BINARY_SIZE 64 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #if ARCH_LITTLE_ENDIAN==1 #define GETPOS_512(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64 *8 ) #else #define GETPOS_512(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + ((i)&7) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64 *8 ) #endif #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif /* taken from eCryptfs */ #define ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS 65536 #define ECRYPTFS_SALT_SIZE 8 #define ECRYPTFS_DEFAULT_SALT "\x00\x11\x22\x33\x44\x55\x66\x77" #define ECRYPTFS_SIG_SIZE 8 static struct fmt_tests ecryptfs_tests[] = { /* hash ==> first 16 bytes of ~/.ecryptfs/wrapped-passphrase */ {"$ecryptfs$0$92dc3db8feaf1676", "openwall"}, {"$ecryptfs$0$ccb515ee115be591", "failpassword"}, {"$ecryptfs$0$8acb10b9e061fcc7", "verylongbutstillfailpassword"}, /* fake hash to test custom salt handling */ {"$ecryptfs$0$1$0000000000000000$884ed410cd143bca", "fake"}, {"$ecryptfs$0$1$544c39674737716a$a8307a01b2d1b008", "fake"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int iterations; // really really unused (even in the original code) int salt_length; unsigned char salt[ECRYPTFS_SALT_SIZE + 1]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; int extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0) return 0; p = ciphertext + FORMAT_TAG_LENGTH; if (*p != '0' || *(p + 1) != '$') return 0; p += 2; if (*p == '1' && *(p + 1) == '$') { // handle salted variety p += 2; if (hexlenl(p, 0) != HEX_BINARY_SIZE || p[HEX_BINARY_SIZE] != '$') return 0; p += (HEX_BINARY_SIZE+1); } return hexlenl(p, &extra) == HEX_BINARY_SIZE && !extra; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i; char *p, *q; memset(&cs, 0, SALT_SIZE); p = ciphertext + FORMAT_TAG_LENGTH; p = p + 2; // skip over "0$" /* support for custom salt */ if (*p == '1' && *(p + 1) == '$') { p = p + 2; q = strchr(p, '$'); cs.salt_length = (q - p) / 2; for (i = 0; i < cs.salt_length; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } else { memcpy(cs.salt, ECRYPTFS_DEFAULT_SALT, ECRYPTFS_SALT_SIZE); } return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[REAL_BINARY_SIZE]; uint64_t dummy; } buf; unsigned char *out = buf.c; int i; char *p = strrchr(ciphertext, '$') + 1; for (i = 0; i < REAL_BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #if defined(SIMD_COEF_64) && !ARCH_LITTLE_ENDIAN alter_endianity_w64(out, REAL_BINARY_SIZE>>5); #endif return out; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { int j; SHA512_CTX ctx; #ifdef SIMD_COEF_64 unsigned char tmpBuf[64]; unsigned int i; unsigned char _IBuf[128*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys; uint64_t *keys64; keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE); keys64 = (uint64_t*)keys; memset(keys, 0, 128*MAX_KEYS_PER_CRYPT); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA512_Init(&ctx); SHA512_Update(&ctx, cur_salt->salt, ECRYPTFS_SALT_SIZE); SHA512_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i])); SHA512_Final((unsigned char *)tmpBuf, &ctx); for (j = 0; j < 64; ++j) keys[GETPOS_512(j, i)] = tmpBuf[j]; keys[GETPOS_512(j, i)] = 0x80; // 64 bytes of crypt data (0x200 bits). keys[GETPOS_512(126, i)] = 0x02; } for (j = 1; j < ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS; j++) SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT); // Last one with FLAT_OUT SIMDSHA512body(keys, (uint64_t*)crypt_out[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT); #else SHA512_Init(&ctx); SHA512_Update(&ctx, cur_salt->salt, ECRYPTFS_SALT_SIZE); SHA512_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA512_Final((unsigned char *)crypt_out[index], &ctx); /* now "h" (crypt_out[index] becomes our input, total SHA-512 calls => 65536 */ for (j = 1; j <= ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS; j++) { SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE); SHA512_Final((unsigned char *)crypt_out[index], &ctx); } #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], REAL_BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], REAL_BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void ecryptfs_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_ecryptfs1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, REAL_BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, ecryptfs_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, ecryptfs_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
hsrp_fmt_plug.c
/* * Cracker for MD5 authentication in HSRP, HSRPv2, VRRP, and GLBP. * http://www.rfc-editor.org/rfc/rfc1828.txt * * This is dedicated to Darya. You inspire me. * * This software is Copyright (c) 2014, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * optimized Feb 2016, JimF. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_hsrp; #elif FMT_REGISTERS_H john_register_one(&fmt_hsrp); #else #include <string.h> #ifdef _OPENMP #include <omp.h> // OMP_SCALE tuned on core i7 4-core HT // 2048 - 8850k 6679k // 4096 - 10642k 7278k // 8192 - 10489k 7532k // 16k - 10413k 7694k // 32k - 12111k 7803k ** this value chosen // 64k - 12420k 6523k // 128k - 12220k 6741k #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 32768 #endif #endif #endif #include "arch.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "hsrp" #define FORMAT_NAME "\"MD5 authentication\" HSRP, HSRPv2, VRRP, GLBP" #define FORMAT_TAG "$hsrp$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 55 // Must fit in a single MD5 block #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE sizeof(struct custom_salt) #define REAL_SALT_SIZE 50 #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"$hsrp$000004030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$52e1db09d18d695b8fefb3730ff8d9d6", "password12345"}, {"$hsrp$000004030a5a01000000000000000000ac102801041c01000000ac1028140000000000000000000000000000000000000000$f15dfa631a0679e0801f8e6b0c0c17ac", "openwall"}, {"$hsrp$000010030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$f02fc41b1b516e2d1261d8800d39ccea", "openwall12345"}, /* HSRPv2 hashes */ {"$hsrp$0128020006040001aabbcc000a000000006400000bb8000027100a000064000000000000000000000000041c010000000a00000a0000000000000000000000000000000000000000$642fedafe1f374bd2fdd8f1ba81d87a2", "password"}, {"$hsrp$0128020006040001aabbcc001400000000c800000bb8000027100a000064000000000000000000000000041c010000000a0000140000000000000000000000000000000000000000$0481257f0fe583b275f03a48e88de72f", "password12345"}, {NULL} }; static char (*saved_key)[64]; // 1 full limb of MD5, we do out work IN this buffer. static MD5_CTX (*saved_ctx); static int *saved_len, dirty; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { int length; unsigned char salt[2048]; // be safe ;) } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); saved_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_ctx)); } static void done(void) { MEM_FREE(saved_ctx); MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q = NULL; int len; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = strrchr(ciphertext, '$'); if (!q || q+1==p) return 0; q = q + 1; // if ((q - p - 1) > REAL_SALT_SIZE * 2) // return 0; len = strspn(q, HEXCHARS_lc); if (len != BINARY_SIZE * 2 || len != strlen(q)) return 0; if (strspn(p, HEXCHARS_lc) != q - p - 1) return 0; if (q-p > (sizeof(cur_salt->salt)-1)*2) return 0; return 1; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i, len; memset(&cs, 0, SALT_SIZE); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; len = (strrchr(ciphertext, '$') - ciphertext) / 2; for (i = 0; i < len; i++) cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2 * i + 1])]; cs.length = len; return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } // this place would normally contain "print_hex" but I do not want to piss of magnum (yet again) #define PUTCHAR(buf, index, val) ((unsigned char*)(buf))[index] = (val) static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; int len = saved_len[index]; if (dirty) { // we use the saved_key buffer in-line. unsigned int *block = (unsigned int*)saved_key[index]; MD5_Init(&saved_ctx[index]); // set bit saved_key[index][len] = 0x80; block[14] = len << 3; #if (ARCH_LITTLE_ENDIAN==0) block[14] = JOHNSWAP(block[14]); #endif MD5_Update(&saved_ctx[index], (unsigned char*)block, 64); // clear the bit, so that get_key returns proper key. saved_key[index][len] = 0; } memcpy(&ctx, &saved_ctx[index], sizeof(MD5_CTX)); // data MD5_Update(&ctx, cur_salt->salt, cur_salt->length); // key (again) MD5_Update(&ctx, saved_key[index], len); MD5_Final((unsigned char*)crypt_out[index], &ctx); } dirty = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void hsrp_set_key(char *key, int index) { int olen = saved_len[index]; int len= strlen(key); saved_len[index] = len; strcpy(saved_key[index], key); if (olen > len) memset(&(saved_key[index][len]), 0, olen-len); dirty = 1; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_hsrp = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, hsrp_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-11,12)),ceild(4*t2-Nz-20,24));t3<=min(min(min(floord(4*t2+Ny,24),floord(Nt+Ny-4,24)),floord(2*t1+Ny+1,24)),floord(4*t1-4*t2+Nz+Ny-1,24));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32)),ceild(24*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t2+Nx,32),floord(Nt+Nx-4,32)),floord(2*t1+Nx+1,32)),floord(24*t3+Nx+20,32)),floord(4*t1-4*t2+Nz+Nx-1,32));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),24*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),24*t3+22),32*t4+30),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
par_strength.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * *****************************************************************************/ #include "_hypre_parcsr_ls.h" /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSHost(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; HYPRE_Int *prefix_sum_workspace; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables + 1, memory_location); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables + 1, memory_location); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_diag, memory_location); HYPRE_Int *S_temp_offd_j = NULL; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_offd, memory_location); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2 * (hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } else { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } else { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } /* diag >= 0*/ } /* num_functions <= 1 */ jS_diag += A_diag_i[i + 1] - A_diag_i[i] - 1; jS_offd += A_offd_i[i + 1] - A_offd_i[i]; /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { S_temp_diag_j[jA] = -1; } jS_diag -= A_diag_i[i + 1] - (A_diag_i[i] + 1); for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { S_temp_offd_j[jA] = -1; } jS_offd -= A_offd_i[i + 1] - A_offd_i[i]; } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions <= 1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i + 1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = memory_location; hypre_CSRMatrixMemoryLocation(S_offd) = memory_location; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /* ----------------------------------------------------------------------- */ HYPRE_Int hypre_BoomerAMGCreateS(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("CreateS"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreateSDevice(A, 0, strength_threshold, max_row_sum, num_functions, dof_func, S_ptr); } else #endif { ierr = hypre_BoomerAMGCreateSHost(A, strength_threshold, max_row_sum, num_functions, dof_func, S_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /* ----------------------------------------------------------------------- */ /* Create Strength matrix from CF marker array data. Provides a more general form to build S for specific nodes of the 'global' matrix (for example, F points or A_FF part), given the entire matrix. These nodes have the SMRK tag. Could possibly be merged with BoomerAMGCreateS() to yield a more general function. */ HYPRE_Int hypre_BoomerAMGCreateSFromCFMarker(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int *CF_marker, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int SMRK, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Int *dof_func_offd = NULL; HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jj, jA, jS; HYPRE_Int num_sends, start, j, index; HYPRE_Int *int_buf_data; HYPRE_Int ierr = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *prefix_sum_workspace; HYPRE_Int my_id; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ hypre_MPI_Comm_rank(comm, &my_id); num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables + 1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables + 1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); HYPRE_Int *S_temp_offd_j = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2 * (hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { if (CF_marker[i] == SMRK) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[jj])) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[A_offd_j[jA]])) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0*/ } /* num_functions <=1 */ /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { S_temp_offd_j[jA] = -1; } } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* end diag < 0 */ else { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag < 0 */ else { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions <=1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* CF_marker == SMRK */ else { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } for (jA = A_diag_i[i]; jA < A_diag_i[i + 1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { S_temp_offd_j[jA] = -1; } } /* CF_marker != SMRK */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i + 1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $|a_ij| >= \theta max_{l != j} |a_il|}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSabsHost(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * Absolute "strength" of dependence/influence is defined in * the following way: i depends on j if * abs(aij) > hypre_max (k != i) abs(aik) * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables + 1, memory_location); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, memory_location); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables + 1, memory_location); S_diag_i = hypre_CSRMatrixI(S_diag); S_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); hypre_CSRMatrixMemoryLocation(S_diag) = memory_location; hypre_CSRMatrixMemoryLocation(S_offd) = memory_location; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, memory_location); S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /* give S same nonzero structure as A */ hypre_ParCSRMatrixCopy(A, S, 0); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_variables; i++) { diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = fabs(diag); if (num_functions > 1) { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } } else { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } /* compute row entries of S */ S_diag_j[A_diag_i[i]] = -1; /* reject diag entry */ if ( fabs(row_sum) < fabs(diag) * (2.0 - max_row_sum) && max_row_sum < 1.0 ) { /* make all dependencies weak */ for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { S_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { S_offd_j[jA] = -1; } } else { if (num_functions > 1) { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_offd_j[jA] = -1; } } } else { for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale) { S_offd_j[jA] = -1; } } } } } /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i + 1]; jA++) { if (S_diag_j[jA] > -1) { S_diag_j[jS] = S_diag_j[jA]; jS++; } } } S_diag_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_diag) = jS; /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (S_offd_j[jA] > -1) { S_offd_j[jS] = S_offd_j[jA]; jS++; } } } S_offd_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_offd) = jS; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return (ierr); } HYPRE_Int hypre_BoomerAMGCreateSabs(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("CreateSabs"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreateSDevice(A, 1, strength_threshold, max_row_sum, num_functions, dof_func, S_ptr); } else #endif { ierr = hypre_BoomerAMGCreateSabsHost(A, strength_threshold, max_row_sum, num_functions, dof_func, S_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSCommPkg(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int **col_offd_S_to_A_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommPkg *comm_pkg_S; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *recv_procs_A = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts_A = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int *send_procs_A = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int *recv_procs_S; HYPRE_Int *recv_vec_starts_S; HYPRE_Int *send_procs_S; HYPRE_Int *send_map_starts_S; HYPRE_Int *send_map_elmts_S = NULL; HYPRE_BigInt *big_send_map_elmts_S = NULL; HYPRE_Int *col_offd_S_to_A; HYPRE_Int *S_marker; HYPRE_Int *send_change; HYPRE_Int *recv_change; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int num_cols_offd_S; HYPRE_Int i, j, jcol; HYPRE_Int proc, cnt, proc_cnt, total_nz; HYPRE_BigInt first_row; HYPRE_Int ierr = 0; HYPRE_Int num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int num_recvs_A = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int num_sends_S; HYPRE_Int num_recvs_S; HYPRE_Int num_nonzeros; num_nonzeros = S_offd_i[num_variables]; S_marker = NULL; if (num_cols_offd_A) { S_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_A; i++) { S_marker[i] = -1; } for (i = 0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_marker[jcol] = 0; } proc = 0; proc_cnt = 0; cnt = 0; num_recvs_S = 0; for (i = 0; i < num_recvs_A; i++) { for (j = recv_vec_starts_A[i]; j < recv_vec_starts_A[i + 1]; j++) { if (!S_marker[j]) { S_marker[j] = cnt; cnt++; proc = 1; } } if (proc) {num_recvs_S++; proc = 0;} } num_cols_offd_S = cnt; recv_change = NULL; recv_procs_S = NULL; send_change = NULL; if (col_map_offd_S) { hypre_TFree(col_map_offd_S, HYPRE_MEMORY_HOST); } col_map_offd_S = NULL; col_offd_S_to_A = NULL; if (num_recvs_A) { recv_change = hypre_CTAlloc(HYPRE_Int, num_recvs_A, HYPRE_MEMORY_HOST); } if (num_sends_A) { send_change = hypre_CTAlloc(HYPRE_Int, num_sends_A, HYPRE_MEMORY_HOST); } if (num_recvs_S) { recv_procs_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S, HYPRE_MEMORY_HOST); } recv_vec_starts_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S + 1, HYPRE_MEMORY_HOST); if (num_cols_offd_S) { col_map_offd_S = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); col_offd_S_to_A = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); } if (num_cols_offd_S < num_cols_offd_A) { for (i = 0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_offd_j[i] = S_marker[jcol]; } proc = 0; proc_cnt = 0; cnt = 0; recv_vec_starts_S[0] = 0; for (i = 0; i < num_recvs_A; i++) { for (j = recv_vec_starts_A[i]; j < recv_vec_starts_A[i + 1]; j++) { if (S_marker[j] != -1) { col_map_offd_S[cnt] = col_map_offd_A[j]; col_offd_S_to_A[cnt++] = j; proc = 1; } } recv_change[i] = j - cnt - recv_vec_starts_A[i] + recv_vec_starts_S[proc_cnt]; if (proc) { recv_procs_S[proc_cnt++] = recv_procs_A[i]; recv_vec_starts_S[proc_cnt] = cnt; proc = 0; } } } else { for (i = 0; i < num_recvs_A; i++) { for (j = recv_vec_starts_A[i]; j < recv_vec_starts_A[i + 1]; j++) { col_map_offd_S[j] = col_map_offd_A[j]; col_offd_S_to_A[j] = j; } recv_procs_S[i] = recv_procs_A[i]; recv_vec_starts_S[i] = recv_vec_starts_A[i]; } recv_vec_starts_S[num_recvs_A] = recv_vec_starts_A[num_recvs_A]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_sends_A + num_recvs_A, HYPRE_MEMORY_HOST); j = 0; for (i = 0; i < num_sends_A; i++) { hypre_MPI_Irecv(&send_change[i], 1, HYPRE_MPI_INT, send_procs_A[i], 0, comm, &requests[j++]); } for (i = 0; i < num_recvs_A; i++) { hypre_MPI_Isend(&recv_change[i], 1, HYPRE_MPI_INT, recv_procs_A[i], 0, comm, &requests[j++]); } status = hypre_CTAlloc(hypre_MPI_Status, j, HYPRE_MEMORY_HOST); hypre_MPI_Waitall(j, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); num_sends_S = 0; total_nz = send_map_starts_A[num_sends_A]; for (i = 0; i < num_sends_A; i++) { if (send_change[i]) { if ((send_map_starts_A[i + 1] - send_map_starts_A[i]) > send_change[i]) { num_sends_S++; } } else { num_sends_S++; } total_nz -= send_change[i]; } send_procs_S = NULL; if (num_sends_S) { send_procs_S = hypre_CTAlloc(HYPRE_Int, num_sends_S, HYPRE_MEMORY_HOST); } send_map_starts_S = hypre_CTAlloc(HYPRE_Int, num_sends_S + 1, HYPRE_MEMORY_HOST); send_map_elmts_S = NULL; if (total_nz) { send_map_elmts_S = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_HOST); big_send_map_elmts_S = hypre_CTAlloc(HYPRE_BigInt, total_nz, HYPRE_MEMORY_HOST); } proc = 0; proc_cnt = 0; for (i = 0; i < num_sends_A; i++) { cnt = send_map_starts_A[i + 1] - send_map_starts_A[i] - send_change[i]; if (cnt) { send_procs_S[proc_cnt++] = send_procs_A[i]; send_map_starts_S[proc_cnt] = send_map_starts_S[proc_cnt - 1] + cnt; } } comm_pkg_S = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_S) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_S) = num_recvs_S; hypre_ParCSRCommPkgRecvProcs(comm_pkg_S) = recv_procs_S; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_S) = recv_vec_starts_S; hypre_ParCSRCommPkgNumSends(comm_pkg_S) = num_sends_S; hypre_ParCSRCommPkgSendProcs(comm_pkg_S) = send_procs_S; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_S) = send_map_starts_S; comm_handle = hypre_ParCSRCommHandleCreate(22, comm_pkg_S, col_map_offd_S, big_send_map_elmts_S); hypre_ParCSRCommHandleDestroy(comm_handle); first_row = hypre_ParCSRMatrixFirstRowIndex(A); if (first_row) for (i = 0; i < send_map_starts_S[num_sends_S]; i++) { send_map_elmts_S[i] = (HYPRE_Int)(big_send_map_elmts_S[i] - first_row); } hypre_ParCSRCommPkgSendMapElmts(comm_pkg_S) = send_map_elmts_S; hypre_ParCSRMatrixCommPkg(S) = comm_pkg_S; hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; hypre_CSRMatrixNumCols(S_offd) = num_cols_offd_S; hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(send_change, HYPRE_MEMORY_HOST); hypre_TFree(recv_change, HYPRE_MEMORY_HOST); *col_offd_S_to_A_ptr = col_offd_S_to_A; return ierr; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCreate2ndS : creates strength matrix on coarse points * for second coarsening pass in aggressive coarsening (S*S+2S) *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreate2ndSHost( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_Int num_paths, HYPRE_BigInt *coarse_row_starts, hypre_ParCSRMatrix **C_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_diag_S = hypre_CSRMatrixNumCols(S_diag); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); hypre_ParCSRMatrix *S2; HYPRE_BigInt *col_map_offd_C = NULL; hypre_CSRMatrix *C_diag; /*HYPRE_Int *C_diag_data = NULL;*/ HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j = NULL; hypre_CSRMatrix *C_offd; /*HYPRE_Int *C_offd_data=NULL;*/ HYPRE_Int *C_offd_i; HYPRE_Int *C_offd_j = NULL; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *S_ext_diag_i = NULL; HYPRE_Int *S_ext_diag_j = NULL; HYPRE_Int S_ext_diag_size = 0; HYPRE_Int *S_ext_offd_i = NULL; HYPRE_Int *S_ext_offd_j = NULL; HYPRE_Int S_ext_offd_size = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *S_marker = NULL; HYPRE_Int *S_marker_offd = NULL; //HYPRE_Int *temp = NULL; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *map_S_to_C = NULL; HYPRE_Int num_sends = 0; HYPRE_Int num_recvs = 0; HYPRE_Int *send_map_starts; HYPRE_Int *tmp_send_map_starts = NULL; HYPRE_Int *send_map_elmts; HYPRE_Int *recv_vec_starts; HYPRE_Int *tmp_recv_vec_starts = NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_int_buf_data = NULL; HYPRE_BigInt *temp = NULL; HYPRE_Int i, j, k; HYPRE_Int i1, i2, i3; HYPRE_BigInt big_i1; HYPRE_Int jj1, jj2, jrow, j_cnt; /*HYPRE_Int cnt, cnt_offd, cnt_diag;*/ HYPRE_Int num_procs, my_id; HYPRE_Int index; /*HYPRE_Int value;*/ HYPRE_Int num_coarse; HYPRE_Int num_nonzeros; HYPRE_BigInt global_num_coarse; HYPRE_BigInt my_first_cpt, my_last_cpt; HYPRE_Int *S_int_i = NULL; HYPRE_BigInt *S_int_j = NULL; HYPRE_Int *S_ext_i = NULL; HYPRE_BigInt *S_ext_j = NULL; /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ HYPRE_Int *prefix_sum_workspace; HYPRE_Int *num_coarse_prefix_sum; prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2 * (hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); num_coarse_prefix_sum = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Extract S_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); my_first_cpt = coarse_row_starts[0]; my_last_cpt = coarse_row_starts[1] - 1; if (my_id == (num_procs - 1)) { global_num_coarse = coarse_row_starts[1]; } hypre_MPI_Bcast(&global_num_coarse, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); if (num_cols_offd_S) { CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); } HYPRE_Int *coarse_to_fine = NULL; if (num_cols_diag_S) { fine_to_coarse = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); coarse_to_fine = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); } /*HYPRE_Int num_coarse_prefix_sum[hypre_NumThreads() + 1];*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int num_coarse_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_diag_S); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) { num_coarse_private++; } } hypre_prefix_sum(&num_coarse_private, &num_coarse, num_coarse_prefix_sum); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = num_coarse_private; coarse_to_fine[num_coarse_private] = i; num_coarse_private++; } else { fine_to_coarse[i] = -1; } } } /* omp parallel */ if (num_procs > 1) { if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); HYPRE_Int begin = send_map_starts[0]; HYPRE_Int end = send_map_starts[num_sends]; big_int_buf_data = hypre_TAlloc(HYPRE_BigInt, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { big_int_buf_data[index - begin] = (HYPRE_BigInt)fine_to_coarse[send_map_elmts[index]] + my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); int_buf_data = hypre_TAlloc(HYPRE_Int, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { int_buf_data[index - begin] = CF_marker[send_map_elmts[index]]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_int_buf_data, HYPRE_MEMORY_HOST); S_int_i = hypre_TAlloc(HYPRE_Int, end + 1, HYPRE_MEMORY_HOST); S_ext_i = hypre_CTAlloc(HYPRE_Int, recv_vec_starts[num_recvs] + 1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * generate S_int_i through adding number of coarse row-elements of offd and diag * for corresponding rows. S_int_i[j+1] contains the number of coarse elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ S_int_i[0] = 0; num_nonzeros = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,k) reduction(+:num_nonzeros) HYPRE_SMP_SCHEDULE #endif for (j = begin; j < end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int index = 0; for (k = S_diag_i[jrow]; k < S_diag_i[jrow + 1]; k++) { if (CF_marker[S_diag_j[k]] > 0) { index++; } } for (k = S_offd_i[jrow]; k < S_offd_i[jrow + 1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) { index++; } } S_int_i[j - begin + 1] = index; num_nonzeros += S_int_i[j - begin + 1]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, &S_int_i[1], &S_ext_i[1]); } if (num_nonzeros) { S_int_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); } tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = 0; j_cnt = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i + 1]; j++) { jrow = send_map_elmts[j]; for (k = S_diag_i[jrow]; k < S_diag_i[jrow + 1]; k++) { if (CF_marker[S_diag_j[k]] > 0) { S_int_j[j_cnt++] = (HYPRE_BigInt)fine_to_coarse[S_diag_j[k]] + my_first_cpt; } } for (k = S_offd_i[jrow]; k < S_offd_i[jrow + 1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) { S_int_j[j_cnt++] = fine_to_coarse_offd[S_offd_j[k]]; } } } tmp_send_map_starts[i + 1] = j_cnt; } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange S_ext_i[j+1] contains the number of coarse elements * of a row j ! * evaluate S_ext_i and compute num_nonzeros for S_ext *--------------------------------------------------------------------------*/ for (i = 0; i < recv_vec_starts[num_recvs]; i++) { S_ext_i[i + 1] += S_ext_i[i]; } num_nonzeros = S_ext_i[recv_vec_starts[num_recvs]]; if (num_nonzeros) { S_ext_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); } tmp_recv_vec_starts[0] = 0; for (i = 0; i < num_recvs; i++) { tmp_recv_vec_starts[i + 1] = S_ext_i[recv_vec_starts[i + 1]]; } hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; comm_handle = hypre_ParCSRCommHandleCreate(21, tmp_comm_pkg, S_int_j, S_ext_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(S_int_i, HYPRE_MEMORY_HOST); hypre_TFree(S_int_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_BigInt *S_big_offd_j = NULL; S_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S + 1, HYPRE_MEMORY_HOST); S_ext_diag_i[0] = 0; S_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S + 1, HYPRE_MEMORY_HOST); S_ext_offd_i[0] = 0; hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, S_ext_i[num_cols_offd_S] + num_cols_offd_S, 16 * hypre_NumThreads()); #pragma omp parallel private(i,j, big_i1) { HYPRE_Int S_ext_offd_size_private = 0; HYPRE_Int S_ext_diag_size_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { hypre_UnorderedBigIntSetPut(&found_set, fine_to_coarse_offd[i]); } for (j = S_ext_i[i]; j < S_ext_i[i + 1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) { S_ext_offd_size_private++; hypre_UnorderedBigIntSetPut(&found_set, big_i1); } else { S_ext_diag_size_private++; } } } hypre_prefix_sum_pair( &S_ext_diag_size_private, &S_ext_diag_size, &S_ext_offd_size_private, &S_ext_offd_size, prefix_sum_workspace); #pragma omp master { if (S_ext_diag_size) { S_ext_diag_j = hypre_TAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); } if (S_ext_offd_size) { S_ext_offd_j = hypre_TAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); S_big_offd_j = hypre_TAlloc(HYPRE_BigInt, S_ext_offd_size, HYPRE_MEMORY_HOST); } } #pragma omp barrier for (i = i_begin; i < i_end; i++) { for (j = S_ext_i[i]; j < S_ext_i[i + 1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) { S_big_offd_j[S_ext_offd_size_private++] = big_i1; } //S_ext_offd_j[S_ext_offd_size_private++] = big_i1; else { S_ext_diag_j[S_ext_diag_size_private++] = (HYPRE_Int)(big_i1 - my_first_cpt); } } S_ext_diag_i[i + 1] = S_ext_diag_size_private; S_ext_offd_i[i + 1] = S_ext_offd_size_private; } } // omp parallel temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0 ; i < S_ext_offd_size; i++) { S_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, S_big_offd_j[i]); } //S_ext_offd_j[i] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, S_ext_offd_j[i]); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); hypre_TFree(S_big_offd_j, HYPRE_MEMORY_HOST); if (num_cols_offd_C) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int cnt_offd, cnt_diag, cnt, value; S_ext_diag_size = 0; S_ext_offd_size = 0; for (i = 0; i < num_cols_offd_S; i++) { for (j = S_ext_i[i]; j < S_ext_i[i + 1]; j++) { if (S_ext_j[j] < my_first_cpt || S_ext_j[j] > my_last_cpt) { S_ext_offd_size++; } else { S_ext_diag_size++; } } } S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S + 1, HYPRE_MEMORY_HOST); S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S + 1, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); } if (S_ext_offd_size) { S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); } cnt_offd = 0; cnt_diag = 0; cnt = 0; HYPRE_Int num_coarse_offd = 0; for (i = 0; i < num_cols_offd_S; i++) { if (CF_marker_offd[i] > 0) { num_coarse_offd++; } for (j = S_ext_i[i]; j < S_ext_i[i + 1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) { S_ext_j[cnt_offd++] = big_i1; } else { S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(big_i1 - my_first_cpt); } } S_ext_diag_i[++cnt] = cnt_diag; S_ext_offd_i[cnt] = cnt_offd; } hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); cnt = 0; if (S_ext_offd_size || num_coarse_offd) { temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size + num_coarse_offd, HYPRE_MEMORY_HOST); for (i = 0; i < S_ext_offd_size; i++) { temp[i] = S_ext_j[i]; } cnt = S_ext_offd_size; for (i = 0; i < num_cols_offd_S; i++) if (CF_marker_offd[i] > 0) { temp[cnt++] = fine_to_coarse_offd[i]; } } if (cnt) { hypre_BigQsort0(temp, 0, cnt - 1); num_cols_offd_C = 1; value = temp[0]; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } if (S_ext_offd_size || num_coarse_offd) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } for (i = 0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_C, S_ext_j[i], num_cols_offd_C); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (num_cols_offd_S) { map_S_to_C = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); HYPRE_BigInt cnt = 0; for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { cnt = hypre_BigLowerBound(col_map_offd_C + cnt, col_map_offd_C + num_cols_offd_C, fine_to_coarse_offd[i]) - col_map_offd_C; map_S_to_C[i] = cnt++; } else { map_S_to_C[i] = -1; } } } /* omp parallel */ } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif } /* num_procs > 1 */ /*----------------------------------------------------------------------- * Allocate and initialize some stuff. *-----------------------------------------------------------------------*/ HYPRE_Int *S_marker_array = NULL, *S_marker_offd_array = NULL; if (num_coarse) { S_marker_array = hypre_TAlloc(HYPRE_Int, num_coarse * hypre_NumThreads(), HYPRE_MEMORY_HOST); } if (num_cols_offd_C) { S_marker_offd_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C * hypre_NumThreads(), HYPRE_MEMORY_HOST); } HYPRE_Int *C_temp_offd_j_array = NULL; HYPRE_Int *C_temp_diag_j_array = NULL; HYPRE_Int *C_temp_offd_data_array = NULL; HYPRE_Int *C_temp_diag_data_array = NULL; if (num_paths > 1) { C_temp_diag_j_array = hypre_TAlloc(HYPRE_Int, num_coarse * hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C * hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_diag_data_array = hypre_TAlloc(HYPRE_Int, num_coarse * hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_data_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C * hypre_NumThreads(), HYPRE_MEMORY_HOST); } C_diag_i = hypre_CTAlloc(HYPRE_Int, num_coarse + 1, HYPRE_MEMORY_HOST); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_coarse + 1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of S *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i1,i2,i3,jj1,jj2,index) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int i1_begin, i1_end; hypre_GetSimpleThreadPartition(&i1_begin, &i1_end, num_cols_diag_S); HYPRE_Int *C_temp_diag_j = NULL, *C_temp_offd_j = NULL; HYPRE_Int *C_temp_diag_data = NULL, *C_temp_offd_data = NULL; if (num_paths > 1) { C_temp_diag_j = C_temp_diag_j_array + num_coarse * my_thread_num; C_temp_offd_j = C_temp_offd_j_array + num_cols_offd_C * my_thread_num; C_temp_diag_data = C_temp_diag_data_array + num_coarse * my_thread_num; C_temp_offd_data = C_temp_offd_data_array + num_cols_offd_C * my_thread_num; } HYPRE_Int *S_marker = NULL, *S_marker_offd = NULL; if (num_coarse) { S_marker = S_marker_array + num_coarse * my_thread_num; } if (num_cols_offd_C) { S_marker_offd = S_marker_offd_array + num_cols_offd_C * my_thread_num; } for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } // These two counters are for before filtering by num_paths HYPRE_Int jj_count_diag = 0; HYPRE_Int jj_count_offd = 0; // These two counters are for after filtering by num_paths HYPRE_Int num_nonzeros_diag = 0; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int ic_begin = num_coarse_prefix_sum[my_thread_num]; HYPRE_Int ic_end = num_coarse_prefix_sum[my_thread_num + 1]; HYPRE_Int ic; if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2 + 1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2 + 1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2 + 1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2 + 1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2 + 1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2 + 1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2 + 1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2 + 1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { ++num_nonzeros_diag; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { ++num_nonzeros_offd; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ hypre_prefix_sum_pair( &num_nonzeros_diag, &C_diag_i[num_coarse], &num_nonzeros_offd, &C_offd_i[num_coarse], prefix_sum_workspace); for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { if (C_diag_i[num_coarse]) { C_diag_j = hypre_TAlloc(HYPRE_Int, C_diag_i[num_coarse], HYPRE_MEMORY_HOST); } if (C_offd_i[num_coarse]) { C_offd_j = hypre_TAlloc(HYPRE_Int, C_offd_i[num_coarse], HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ic = ic_begin; ic < ic_end - 1; ic++) { if (C_diag_i[ic + 1] == C_diag_i[ic] && C_offd_i[ic + 1] == C_offd_i[ic]) { CF_marker[coarse_to_fine[ic]] = 2; } C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; } if (ic_begin < ic_end) { C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; HYPRE_Int next_C_diag_i = prefix_sum_workspace[2 * (my_thread_num + 1)]; HYPRE_Int next_C_offd_i = prefix_sum_workspace[2 * (my_thread_num + 1) + 1]; if (next_C_diag_i == C_diag_i[ic] && next_C_offd_i == C_offd_i[ic]) { CF_marker[coarse_to_fine[ic]] = 2; } } if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2 + 1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2 + 1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2 + 1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = i3; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2 + 1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = i3; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { jj_count_diag = num_nonzeros_diag; jj_count_offd = num_nonzeros_offd; for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2 + 1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2 + 1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2 + 1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = i3; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2 + 1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = i3; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { C_diag_j[num_nonzeros_diag++] = C_temp_diag_j[jj1 - jj_row_begin_diag]; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { C_offd_j[num_nonzeros_offd++] = C_temp_offd_j[jj1 - jj_row_begin_offd]; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ } /* omp parallel */ S2 = hypre_ParCSRMatrixCreate(comm, global_num_coarse, global_num_coarse, coarse_row_starts, coarse_row_starts, num_cols_offd_C, C_diag_i[num_coarse], C_offd_i[num_coarse]); C_diag = hypre_ParCSRMatrixDiag(S2); hypre_CSRMatrixI(C_diag) = C_diag_i; if (C_diag_i[num_coarse]) { hypre_CSRMatrixJ(C_diag) = C_diag_j; } C_offd = hypre_ParCSRMatrixOffd(S2); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(S2) = C_offd; if (num_cols_offd_C) { if (C_offd_i[num_coarse]) { hypre_CSRMatrixJ(C_offd) = C_offd_j; } hypre_ParCSRMatrixColMapOffd(S2) = col_map_offd_C; } /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(C_temp_diag_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_diag_data_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_data_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_to_fine, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST); } hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST); } if (num_cols_offd_S) { hypre_TFree(map_S_to_C, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); } hypre_CSRMatrixMemoryLocation(C_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(C_offd) = HYPRE_MEMORY_HOST; *C_ptr = S2; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] += hypre_MPI_Wtime(); #endif hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(num_coarse_prefix_sum, HYPRE_MEMORY_HOST); return 0; } //----------------------------------------------------------------------- HYPRE_Int hypre_BoomerAMGCreate2ndS( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_Int num_paths, HYPRE_BigInt *coarse_row_starts, hypre_ParCSRMatrix **C_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("Create2ndS"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(S) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreate2ndSDevice( S, CF_marker, num_paths, coarse_row_starts, C_ptr ); } else #endif { ierr = hypre_BoomerAMGCreate2ndSHost( S, CF_marker, num_paths, coarse_row_starts, C_ptr ); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker : corrects CF_marker after aggr. coarsening *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarkerHost(hypre_IntArray *CF_marker, hypre_IntArray *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i = 0; i < hypre_IntArraySize(CF_marker); i++) { if (hypre_IntArrayData(CF_marker)[i] > 0 ) { if (hypre_IntArrayData(CF_marker)[i] == 1) { hypre_IntArrayData(CF_marker)[i] = hypre_IntArrayData(new_CF_marker)[cnt++]; } else { hypre_IntArrayData(CF_marker)[i] = 1; cnt++; } } } return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker2 : corrects CF_marker after aggr. coarsening, * but marks new F-points (previous C-points) as -2 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker2Host(hypre_IntArray *CF_marker, hypre_IntArray *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i = 0; i < hypre_IntArraySize(CF_marker); i++) { if (hypre_IntArrayData(CF_marker)[i] > 0 ) { if (hypre_IntArrayData(new_CF_marker)[cnt] == -1) { hypre_IntArrayData(CF_marker)[i] = -2; } else { hypre_IntArrayData(CF_marker)[i] = 1; } cnt++; } } return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker : corrects CF_marker after aggr. coarsening *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker(hypre_IntArray *CF_marker, hypre_IntArray *new_CF_marker) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("CorrectCFMarker"); #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_IntArrayMemoryLocation(CF_marker), hypre_IntArrayMemoryLocation(new_CF_marker)); if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGCorrectCFMarkerDevice(CF_marker, new_CF_marker); } else #endif { hypre_BoomerAMGCorrectCFMarkerHost(CF_marker, new_CF_marker); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker2 : corrects CF_marker after aggr. coarsening, * but marks new F-points (previous C-points) as -2 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker2(hypre_IntArray *CF_marker, hypre_IntArray *new_CF_marker) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("CorrectCFMarker2"); #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_IntArrayMemoryLocation(CF_marker), hypre_IntArrayMemoryLocation(new_CF_marker)); if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGCorrectCFMarker2Device(CF_marker, new_CF_marker); } else #endif { hypre_BoomerAMGCorrectCFMarker2Host(CF_marker, new_CF_marker); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return hypre_error_flag; }
NETNTLM_bs_fmt_plug.c
/* * NETNTLM_fmt.c -- NTLM Challenge/Response * * Written by JoMo-Kun <jmk at foofus.net> in 2007 * and placed in the public domain. * * Modified for performance, support for Extended Session Security, OMP * and UTF-8, by magnum 2010-2011. * Modified for using Bitsliced DES by Deepika Dutta Mishra * <dipikadutta at gmail.com> in 2013, no rights reserved. * * This algorithm is designed for performing brute-force cracking of the NTLM * (version 1) challenge/response pairs exchanged during network-based * authentication attempts [1]. The captured challenge/response pairs from these * attempts should be stored using the L0phtCrack 2.0 LC format, specifically: * username:unused:unused:lm response:ntlm response:challenge. For example: * * CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1: * C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788 * * It should be noted that a NTLM authentication response is not same as a NTLM * password hash, which can be extracted using tools such as FgDump [2]. NTLM * responses can be gathered via normal network capture or via tools which * perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can * also be harvested using a modified Samba service [5] in conjunction with * some trickery to convince the user to connect to it. I leave what that * trickery may actually be as an exercise for the reader (HINT: Karma, NMB * broadcasts, IE, Outlook, social engineering, ...). * * [1] http://davenport.sourceforge.net/ntlm.html#theNtlmResponse * [2] http://www.foofus.net/~fizzgig/fgdump/ * [3] http://ettercap.sourceforge.net/ * [4] http://www.oxid.it/cain.html * [5] http://www.foofus.net/jmk/smbchallenge.html * * This version supports Extended Session Security. This is what * is used when the "LM" hash ends in 32 zeros: * * DOMAIN\User:::c70e4fb229437ef300000000000000000000000000000000: * abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9:24ca92fdab441aa4 * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_NETNTLM_old; #elif FMT_REGISTERS_H john_register_one(&fmt_NETNTLM_old); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "DES_std.h" #include "DES_bs.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "md5.h" #include "unicode.h" #include "memdbg.h" #ifndef uchar #define uchar unsigned char #endif #define FORMAT_LABEL "netntlm-naive" #define FORMAT_NAME "NTLMv1 C/R" #define ALGORITHM_NAME "MD4 DES (ESS MD5) " DES_BS_ALGORITHM_NAME " naive" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 24 #define BINARY_ALIGN 4 #define PARTIAL_BINARY_SIZE 8 #define SALT_SIZE 8 #define SALT_ALIGN 4 #define CIPHERTEXT_LENGTH 48 #define TOTAL_LENGTH (10 + 2 * 2 * SALT_SIZE + CIPHERTEXT_LENGTH) #define MIN_KEYS_PER_CRYPT DES_BS_DEPTH #define MAX_KEYS_PER_CRYPT DES_BS_DEPTH static struct fmt_tests tests[] = { {"", "FooBarGerg", {"User", "", "", "lm-hash", "35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "1122334455667788"} }, {"$NETNTLM$1122334455667788$BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "g3rg3g3rg3g3rg3"}, {"$NETNTLM$1122334455667788$E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "M1xedC4se%^&*@)##(blahblah!@#"}, {"$NETNTLM$c75c20bff9baa71f4765f360625700b0$81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "password"}, {"$NETNTLM$1122334455667788$35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "FooBarGerg"}, {"$NETNTLM$1122334455667788$A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "visit www.foofus.net"}, {"$NETNTLM$24ca92fdab441aa4c70e4fb229437ef3$abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9", "longpassword"}, {"$NETNTLM$1122334455667788$B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "cory21"}, {"", "g3rg3g3rg3g3rg3", {"User", "", "", "lm-hash", "BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "1122334455667788"} }, {"", "M1xedC4se%^&*@)##(blahblah!@#", {"User", "", "", "lm-hash", "E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "1122334455667788"} }, {"", "visit www.foofus.net", {"User", "", "", "lm-hash", "A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "1122334455667788"} }, {"", "password", {"ESS", "", "", "4765f360625700b000000000000000000000000000000000", "81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "c75c20bff9baa71f"} }, {"", "cory21", {"User", "", "", "lm-hash", "B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "1122334455667788"} }, {NULL} }; static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int (*saved_len); static uchar (*output)[PARTIAL_BINARY_SIZE]; static uchar (*saved_key)[21]; // NT hash static uchar *challenge; static int keys_prepared; static void set_salt(void *salt); static void init(struct fmt_main *self) { /* LM =2 for DES encryption with no salt and no iterations */ DES_bs_init(2, DES_bs_cpt); #if DES_bs_mt self->params.min_keys_per_crypt = DES_bs_min_kpc; self->params.max_keys_per_crypt = DES_bs_max_kpc; #endif saved_plain = mem_calloc_tiny(sizeof(*saved_plain) * self->params.max_keys_per_crypt, MEM_ALIGN_NONE); saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); output = mem_calloc_tiny(sizeof(*output) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_NONE); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; if (strncmp(ciphertext, "$NETNTLM$", 9)!=0) return 0; if ((strlen(ciphertext) != 74) && (strlen(ciphertext) != 90)) return 0; if ((ciphertext[25] != '$') && (ciphertext[41] != '$')) return 0; for (pos = &ciphertext[9]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (*pos != '$') return 0; for (pos++;atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (!*pos && ((pos - ciphertext - 26 == CIPHERTEXT_LENGTH) || (pos - ciphertext - 42 == CIPHERTEXT_LENGTH))) return 1; else return 0; } static char *prepare(char *split_fields[10], struct fmt_main *self) { char *cp; char clientChal[17]; if (!strncmp(split_fields[1], "$NETNTLM$", 9)) return split_fields[1]; if (!split_fields[3]||!split_fields[4]||!split_fields[5]) return split_fields[1]; if (strlen(split_fields[4]) != CIPHERTEXT_LENGTH) return split_fields[1]; // this string suggests we have an improperly formatted NTLMv2 if (!strncmp(&split_fields[4][32], "0101000000000000", 16)) return split_fields[1]; // Handle ESS (8 byte client challenge in "LM" field padded with zeros) if (strlen(split_fields[3]) == 48 && !strncmp(&split_fields[3][16], "00000000000000000000000000000000", 32)) { memcpy(clientChal, split_fields[3],16); clientChal[16] = 0; } else clientChal[0] = 0; cp = mem_alloc(9+strlen(split_fields[5])+strlen(clientChal)+1+strlen(split_fields[4])+1); sprintf(cp, "$NETNTLM$%s%s$%s", split_fields[5], clientChal, split_fields[4]); if (valid(cp,self)) { char *cp2 = str_alloc_copy(cp); MEM_FREE(cp); return cp2; } MEM_FREE(cp); return split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TOTAL_LENGTH + 1]; memset(out, 0, TOTAL_LENGTH + 1); strcpy(out, ciphertext); strlwr(&out[8]); /* Exclude: $NETNTLM$ */ return out; } static ARCH_WORD_32 *generate_des_format(uchar* binary) { static ARCH_WORD_32 out[6]; ARCH_WORD block[6]; int chr, src,dst,i; uchar value, mask; ARCH_WORD *ptr; memset(block, 0, sizeof(block)); for (chr = 0; chr < 24; chr=chr + 8) { dst = 0; for(i=0; i<8; i++) { value = binary[chr + i]; mask = 0x80; for (src = 0; src < 8; src++) { if (value & mask) block[(chr/4) + (dst>>5)]|= 1 << (dst & 0x1F); mask >>= 1; dst++; } } } /* Apply initial permutation on ciphertext blocks */ for(i=0; i<6; i=i+2) { ptr = DES_do_IP(&block[i]); out[i] = ptr[1]; out[i+1] = ptr[0]; } return out; } static void *get_binary(char *ciphertext) { uchar binary[BINARY_SIZE]; int i; ARCH_WORD_32 *ptr; ciphertext = strrchr(ciphertext, '$') + 1; for (i=0; i<BINARY_SIZE; i++) { binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4; binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]); } /* Set binary in DES format */ ptr = generate_des_format(binary); return ptr; } static inline void setup_des_key(unsigned char key_56[], int index) { char key[8]; /* Right shift key bytes by 1 to bring in openssl format */ /* Each byte of key is xored with 0x80 to pass check for 0 in DES_bs_set_key() */ key[0] = (key_56[0] >> 1) | 0x80; key[1] = (((key_56[0] << 7) | (key_56[1] >> 1)) >>1) | 0x80; key[2] = (((key_56[1] << 6) | (key_56[2] >> 2)) >>1) | 0x80; key[3] = (((key_56[2] << 5) | (key_56[3] >> 3)) >>1) | 0x80; key[4] = (((key_56[3] << 4) | (key_56[4] >> 4)) >>1) | 0x80; key[5] = (((key_56[4] << 3) | (key_56[5] >> 5)) >>1) | 0x80; key[6] = (((key_56[5] << 2) | (key_56[6] >> 6)) >>1) | 0x80; key[7] = ((key_56[6] << 1) >>1 ) | 0x80; DES_bs_set_key((char*)key, index); } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i; if (!keys_prepared) { #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i++) { int len; /* Generate 16-byte NTLM hash */ len = E_md4hash((uchar *) saved_plain[i], saved_len[i], saved_key[i]); if (len <= 0) saved_plain[i][-len] = 0; // match truncation /* NULL-padding the 16-byte hash to 21-bytes is made in cmp_exact if needed */ setup_des_key(saved_key[i], i); } keys_prepared = 1; } /* Bitsliced des encryption */ DES_bs_crypt_plain(count); return count; } static int cmp_all(void *binary, int count) { return DES_bs_cmp_all((ARCH_WORD_32 *)binary, count); } static int cmp_one(void *binary, int index) { return DES_bs_cmp_one((ARCH_WORD_32 *)binary, 32, index); } static int cmp_exact(char *source, int index) { ARCH_WORD_32 *binary; /* NULL-pad 16-byte NTLM hash to 21-bytes (postponed until now) */ memset(&saved_key[index][16], 0, 5); binary = get_binary(source); if (!DES_bs_cmp_one(binary, 64, index)) { setup_des_key(saved_key[0], 0); return 0; } setup_des_key(&saved_key[index][7], 0); DES_bs_crypt_plain(1); binary = get_binary(source); if (!DES_bs_cmp_one(&binary[2], 64, 0)) { setup_des_key(saved_key[0], 0); return 0; } setup_des_key(&saved_key[index][14], 0); DES_bs_crypt_plain(1); binary = get_binary(source); if (!DES_bs_cmp_one(&binary[4], 64, 0)) { setup_des_key(saved_key[0], 0); return 0; } setup_des_key(saved_key[0], 0); return 1; } static void *get_salt(char *ciphertext) { static uchar *binary_salt; int i, cnt,j; unsigned char temp[SALT_SIZE]; if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); if (ciphertext[25] == '$') { // Server challenge ciphertext += 9; for (i = 0; i < SALT_SIZE; ++i) binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; } else { uchar es_salt[2*SALT_SIZE], k1[2*SALT_SIZE]; MD5_CTX ctx; ciphertext += 9; // Extended Session Security, // Concatenate Server & Client challenges for (i = 0;i < 2 * SALT_SIZE; ++i) es_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; // MD5 the concatenated challenges, result is our key MD5_Init(&ctx); MD5_Update(&ctx, es_salt, 16); MD5_Final((void*)k1, &ctx); memcpy(binary_salt, k1, SALT_SIZE); // but only 8 bytes of it } /* Apply IP to salt */ memset(temp, 0, SALT_SIZE); for (i = 0; i < 64; i++) { cnt = DES_IP[i ^ 0x20]; j = (uchar)((binary_salt[cnt >> 3] >> (7 - (cnt & 7))) & 1); temp[i/8] |= j << (7 - (i % 8)); } memcpy(binary_salt, temp, SALT_SIZE); return (void*)binary_salt; } static void set_salt(void *salt) { challenge = salt; DES_bs_generate_plaintext(challenge); } static void netntlm_set_key(char *key, int index) { saved_len[index] = strlen(key); memcpy(saved_plain[index], key, saved_len[index]+1); keys_prepared = 0; } static char *get_key(int index) { return saved_plain[index]; } static int salt_hash(void *salt) { return *(ARCH_WORD_32 *)salt & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_NETNTLM_old = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if DES_BS FMT_BS | #if DES_bs_mt FMT_OMP | #endif #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, netntlm_set_key, get_key, fmt_default_clear_keys, crypt_all, { DES_bs_get_hash_0, DES_bs_get_hash_1, DES_bs_get_hash_2, DES_bs_get_hash_3, DES_bs_get_hash_4, DES_bs_get_hash_5, DES_bs_get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
simd8.c
/* { dg-do compile } */ /* { dg-options "-fopenmp -O3 -fdump-tree-vect-details" } */ /* { dg-final { scan-tree-dump-times "vectorized 0 loops in function" 4 "vect" } } */ int a[1024]; void foo (void) { #pragma omp simd if (0) for (int i = 0; i < 1024; ++i) a[i] = a[i] + 1; } void bar (void) { #pragma omp simd if (0) safelen (256) simdlen (8) for (int i = 0; i < 512; ++i) a[i] = a[i] + 1; } void baz (void) { #pragma omp simd safelen (256) simdlen (1) for (int i = 0; i < 512; ++i) a[i] = a[i] + 1; } void qux (void) { #pragma omp simd simdlen (1) if (1) for (int i = 0; i < 512; ++i) a[i] = a[i] + 1; }
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundCensus(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double census; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); census=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) census++; p+=GetPixelChannels(edge_image); } } census/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(census); } static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge) { double census; census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(census); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_census, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); (void) memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_census=GetMinEdgeBackgroundCensus(&edge); for ( ; background_census < percent_background; background_census=GetMinEdgeBackgroundCensus(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_census) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_census) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_census) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_census) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[4], zero; RectangleInfo bounds; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); artifact=GetImageArtifact(image, "trim:edges"); if (artifact == (const char *) NULL) { bounds.width=image->columns == 1 ? 1 : 0; bounds.height=image->rows == 1 ? 1 : 0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; } else { char *edges, *p, *q; bounds.width=(size_t) image->columns; bounds.height=(size_t) image->rows; bounds.x=0; bounds.y=0; edges=AcquireString(artifact); q=edges; while ((p=StringToken(",",&q)) != (char *) NULL) { if (LocaleCompare(p,"north") == 0) bounds.y=(ssize_t) image->rows; if (LocaleCompare(p,"east") == 0) bounds.width=0; if (LocaleCompare(p,"south") == 0) bounds.height=0; if (LocaleCompare(p,"west") == 0) bounds.x=(ssize_t) image->columns; } edges=DestroyString(edges); } GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,(ssize_t) image->rows-1,1,1,exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[3]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,p,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; if ((x < (ssize_t) bounding_box.width) && (y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[3]) == MagickFalse)) { bounding_box.width=(size_t) x; bounding_box.height=(size_t) y; } p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o n v e x H u l l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageConvexHull() returns the convex hull points of an image canvas. % % The format of the GetImageConvexHull method is: % % PointInfo *GetImageConvexHull(const Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the convex hull. % % o exception: return any errors or warnings in this structure. % */ static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c) { /* Order by x-coordinate, and in case of a tie, by y-coordinate. */ return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x)); } static PixelInfo GetEdgeBackgroundColor(const Image *image, const CacheView *image_view,ExceptionInfo *exception) { const char *artifact; double census[4], edge_census; PixelInfo background[4], edge_background; ssize_t i; /* Most dominant color of edges/corners is the background color of the image. */ artifact=GetImageArtifact(image,"convex-hull:background-color"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"background"); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i < 4; i++) { CacheView *edge_view; GravityType gravity; Image *edge_image; PixelInfo pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; census[i]=0.0; (void) memset(&edge_geometry,0,sizeof(edge_geometry)); switch (i) { case 0: default: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); gravity=WestGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 1: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); gravity=EastGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 2: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); gravity=NorthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } case 3: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); gravity=SouthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } } GetPixelInfoPixel(image,p,background+i); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,background+i, exception); GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) continue; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1, exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse) census[i]++; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); } edge_census=(-1.0); for (i=0; i < 4; i++) if (census[i] > edge_census) { edge_background=background[i]; edge_census=census[i]; } return(edge_background); } void TraceConvexHull(PointInfo *vertices,size_t number_vertices, PointInfo ***monotone_chain,size_t *chain_length) { PointInfo **chain; ssize_t i; size_t demark, n; /* Construct the upper and lower hulls: rightmost to leftmost counterclockwise. */ chain=(*monotone_chain); n=0; for (i=0; i < (ssize_t) number_vertices; i++) { while ((n >= 2) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } demark=n+1; for (i=(ssize_t) number_vertices-2; i >= 0; i--) { while ((n >= demark) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } *chain_length=n; } MagickExport PointInfo *GetImageConvexHull(const Image *image, size_t *number_vertices,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MemoryInfo *monotone_info, *vertices_info; PixelInfo background; PointInfo *convex_hull, **monotone_chain, *vertices; size_t n; ssize_t y; /* Identify convex hull vertices of image foreground object(s). */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(*vertices)); monotone_info=AcquireVirtualMemory(2*image->columns,2* image->rows*sizeof(*monotone_chain)); if ((vertices_info == (MemoryInfo *) NULL) || (monotone_info == (MemoryInfo *) NULL)) { if (monotone_info != (MemoryInfo *) NULL) monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info); if (vertices_info != (MemoryInfo *) NULL) vertices_info=RelinquishVirtualMemory(vertices_info); return((PointInfo *) NULL); } vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info); monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info); image_view=AcquireVirtualCacheView(image,exception); background=GetEdgeBackgroundColor(image,image_view,exception); status=MagickTrue; n=0; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) { vertices[n].x=(double) x; vertices[n].y=(double) y; n++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Return the convex hull of the image foreground object(s). */ TraceConvexHull(vertices,n,&monotone_chain,number_vertices); convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*convex_hull)); if (convex_hull != (PointInfo *) NULL) for (n=0; n < *number_vertices; n++) convex_hull[n]=(*monotone_chain[n]); monotone_info=RelinquishVirtualMemory(monotone_info); vertices_info=RelinquishVirtualMemory(vertices_info); return(convex_hull); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[i])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M i n i m u m B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMinimumBoundingBox() returns the points that form the minimum % bounding box around the image foreground objects with the "Rotating % Calipers" algorithm. The method also returns these properties: % minimum-bounding-box:area, minimum-bounding-box:width, % minimum-bounding-box:height, and minimum-bounding-box:angle. % % The format of the GetImageMinimumBoundingBox method is: % % PointInfo *GetImageMinimumBoundingBox(Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the bounding box. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CaliperInfo { double area, width, height, projection; ssize_t p, q, v; } CaliperInfo; static inline double getAngle(PointInfo *p,PointInfo *q) { /* Get the angle between line (p,q) and horizontal axis, in degrees. */ return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x))); } static inline double getDistance(PointInfo *p,PointInfo *q) { double distance; distance=hypot(p->x-q->x,p->y-q->y); return(distance*distance); } static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Projection of vector (x,y) - p into a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance); } static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Distance from a point (x,y) to a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance); } MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image, size_t *number_vertices,ExceptionInfo *exception) { CaliperInfo caliper_info; const char *artifact; double angle, diameter, distance; PointInfo *bounding_box, *vertices; ssize_t i; size_t number_hull_vertices; /* Generate the minimum bounding box with the "Rotating Calipers" algorithm. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices=GetImageConvexHull(image,&number_hull_vertices,exception); if (vertices == (PointInfo *) NULL) return((PointInfo *) NULL); *number_vertices=4; bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*bounding_box)); if (bounding_box == (PointInfo *) NULL) { vertices=(PointInfo *) RelinquishMagickMemory(vertices); return((PointInfo *) NULL); } caliper_info.area=2.0*image->columns*image->rows; caliper_info.width=(double) image->columns+image->rows; caliper_info.height=0.0; caliper_info.projection=0.0; caliper_info.p=(-1); caliper_info.q=(-1); caliper_info.v=(-1); for (i=0; i < (ssize_t) number_hull_vertices; i++) { double area = 0.0, max_projection = 0.0, min_diameter = -1.0, min_projection = 0.0; ssize_t j, k; ssize_t p = -1, q = -1, v = -1; for (j=0; j < (ssize_t) number_hull_vertices; j++) { double diameter; diameter=fabs(getFeretDiameter(&vertices[i], &vertices[(i+1) % number_hull_vertices],&vertices[j])); if (min_diameter < diameter) { min_diameter=diameter; p=i; q=(i+1) % number_hull_vertices; v=j; } } for (k=0; k < (ssize_t) number_hull_vertices; k++) { double projection; /* Rotating calipers. */ projection=getProjection(&vertices[p],&vertices[q],&vertices[k]); min_projection=MagickMin(min_projection,projection); max_projection=MagickMax(max_projection,projection); } area=min_diameter*(max_projection-min_projection); if (caliper_info.area > area) { caliper_info.area=area; caliper_info.width=min_diameter; caliper_info.height=max_projection-min_projection; caliper_info.projection=max_projection; caliper_info.p=p; caliper_info.q=q; caliper_info.v=v; } } /* Initialize minimum bounding box. */ diameter=getFeretDiameter(&vertices[caliper_info.p], &vertices[caliper_info.q],&vertices[caliper_info.v]); angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y, vertices[caliper_info.q].x-vertices[caliper_info.p].x); bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)* caliper_info.projection; bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)* caliper_info.projection; bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+ 0.5); bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+ 0.5); bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+ 0.5); bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+ 0.5); /* Export minimum bounding box properties. */ (void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g", GetMagickPrecision(),caliper_info.area); (void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g", GetMagickPrecision(),caliper_info.width); (void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g", GetMagickPrecision(),caliper_info.height); (void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.p].x, GetMagickPrecision(),vertices[caliper_info.p].y); (void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.q].x, GetMagickPrecision(),vertices[caliper_info.q].y); (void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.v].x, GetMagickPrecision(),vertices[caliper_info.v].y); /* Find smallest angle to origin. */ distance=hypot(bounding_box[0].x,bounding_box[0].y); angle=getAngle(&bounding_box[0],&bounding_box[1]); for (i=1; i < 4; i++) { double d = hypot(bounding_box[i].x,bounding_box[i].y); if (d < distance) { distance=d; angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]); } } artifact=GetImageArtifact(image,"minimum-bounding-box:orientation"); if (artifact != (const char *) NULL) { double length, q_length, p_length; PointInfo delta, point; /* Find smallest perpendicular distance from edge to origin. */ point=bounding_box[0]; for (i=1; i < 4; i++) { if (bounding_box[i].x < point.x) point.x=bounding_box[i].x; if (bounding_box[i].y < point.y) point.y=bounding_box[i].y; } for (i=0; i < 4; i++) { bounding_box[i].x-=point.x; bounding_box[i].y-=point.y; } for (i=0; i < 4; i++) { double d, intercept, slope; delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x; delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y; slope=delta.y*PerceptibleReciprocal(delta.x); intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x; d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)* PerceptibleReciprocal(sqrt(slope*slope+1.0))); if ((i == 0) || (d < distance)) { distance=d; point=delta; } } angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x))); length=hypot(point.x,point.y); p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)- length); q_length=fabs(length-(double) MagickMin(caliper_info.width, caliper_info.height)); if (LocaleCompare(artifact,"landscape") == 0) { if (p_length > q_length) angle+=(angle < 0.0) ? 90.0 : -90.0; } else if (LocaleCompare(artifact,"portrait") == 0) { if (p_length < q_length) angle+=(angle >= 0.0) ? 90.0 : -90.0; } } (void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g", GetMagickPrecision(),angle); (void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g", GetMagickPrecision(),-angle); vertices=(PointInfo *) RelinquishMagickMemory(vertices); return(bounding_box); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; const Quantum *p; ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; ssize_t x; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } type=IdentifyImageGray(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleAlphaType)) return(type); if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const Quantum *p; ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { Quantum *depth_map; ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=depth_map[ScaleQuantumToMap(q[i])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); status=BilevelImage(image,QuantumRange/2.0,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
ops.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #pragma once #ifndef OPS_H_ #define OPS_H_ #include <op_boilerplate.h> #include <array/DataTypeUtils.h> #include <helpers/shape.h> #include <vector> #include <Environment.h> #include <loops/summarystatsreduce.h> #include <loops/ReduceType.h> #define MIN_V 1e-12 #define MAX_FLOAT 1e37 #define MIN_FLOAT 1e-37 #define MAX_INT 2147483647 #define MIN_CUTFOFF -3.79297773665f #define FLOAT_MIN_NORMAL 1.17549435e-38 #define EPS 1e-5 #define AFFINITY close #define DOUBLE_PI_T T(2.0 * 3.14159265358979323846) #define DOUBLE_PI_X X(2.0 * 3.14159265358979323846) #define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #ifdef __CUDACC__ #define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer,Z *result, Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #else // hacky fix for isnan/being being out of scope //#ifdef IOS //#define isinf(x) 0 // this isn't right. But std::isinf fails //#define isnan(x) 0 //#else //#define isnan std::isnan //#define isinf std::isinf //#endif #define no_op_exec_special_cuda #define no_op_exec_special_accumulation_cuda #define no_op_exec_special_accumulation_same_cuda #define no_op_exec_special_accumulation_long_cuda #define no_op_exec_special_any_cuda #define no_op_exec_special_bool_cuda #define no_op_exec_special_same_cuda #define no_op_exec_special_accumulation_same_cuda #endif #define SELU_ALPHA 1.6732632423543772848170429916717 #define SELU_LAMBDA 1.0507009873554804934193349852946 namespace functions { namespace indexreduce { template <typename T> struct IndexValue { T value; Nd4jLong index; _CUDA_HD IndexValue() = default; _CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {} }; } namespace summarystats { template <typename T> class SummaryStatsData; } } namespace simdOps { template <typename X, typename Y, typename Z> class Add { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 + d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 + d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 + params[0]); } op_def static X startingValue() { return static_cast<X>(0.f); } }; template <typename X, typename Y> class NewAdd { public: op_def static X op(X d1, Y d2, X *params) { return d1 + d2; } }; template <typename X, typename Y, typename Z> class Subtract { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 - d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 - d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 - params[0]); } }; template <typename X, typename Y, typename Z> class SquaredSubtract { public: op_def static Z op(X d1, Y d2) { auto d = static_cast<Z>(d1 - d2); return d * d; } op_def static Z op(X d1, Y d2, Z *params) { auto d = static_cast<Z>(d1 - d2); return d * d; } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { auto d = static_cast<Z>(d1 - params[0]); return d * d; } }; template <typename X, typename Y, typename Z> class SquaredReverseSubtract { public: op_def static Z op(X d1, Y d2) { auto d = static_cast<Z>(d2 - d1); return d * d; } op_def static Z op(X d1, Y d2, Z *params) { auto d = static_cast<Z>(d2 - d1); return d * d; } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { auto d = static_cast<Z>(params[0] - d1); return d * d; } }; template <typename X, typename Y, typename Z> class ReverseSubtract { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 - d1); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2 - d1); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(params[0] - d1); } }; template <typename X, typename Y, typename Z> class LogPoissonLossFull { public: op_def static Z op(X z, Y c) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz))); } op_def static Z op(X z, Y c, Z *params) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz))); } op_def static Z op(X z) { auto zz = static_cast<Z>(z); return (zz * nd4j::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)); } // op for MetaOps op_def static X op(X z, Y *params) { return (nd4j::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * nd4j::math::nd4j_log<X, Z>(DOUBLE_PI_X * z))); } }; template <typename X, typename Y, typename Z> class LogPoissonLoss { public: op_def static Z op(X z, Y c) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc); } op_def static Z op(X z, Y c, Z *params) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc); } op_def static Z op(X z) { return static_cast<Z>(z); } // op for MetaOps op_def static Z op(X z, Y *params) { return (nd4j::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0])); } }; template <typename X, typename Y, typename Z> class Multiply { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 * d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 * d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 * params[0]); } op_def static X startingValue() { return static_cast<X>(1.f); } }; template <typename X, typename Y, typename Z> class Divide { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 / d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 / d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 / params[0]); } op_def static X startingValue() { return static_cast<X>(1); } }; template <typename X, typename Y, typename Z> class DivideNoNan { public: op_def static Z op(X d1, Y d2) { if (d2 == (Y)0) return (Z)0; return static_cast<Z>(d1 / d2); } op_def static Z op(X d1, Y d2, Z *params) { if (d2 == (Y)0) return (Z)0; return static_cast<Z>(d1 / d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { if (params[0] == (Y)0) return (Z)0; return static_cast<Z>(d1 / params[0]); } op_def static X startingValue() { return static_cast<X>(1); } }; template <typename X, typename Y, typename Z> class SafeDivide { public: op_def static Z op(X d1, Y d2) { if(d2 == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / d2); } op_def static Z op(X d1, Y d2, Z *params) { if(d2 == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { if(params[0] == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / params[0]); } }; template <typename X, typename Y, typename Z> class FloorDiv { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2)); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2)); } op_def static Z op(X d1) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1)); } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0])); } }; template <typename X, typename Y, typename Z> class TruncateDiv { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 / i2); } op_def static Z op(X d1, Y d2, Z *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 / i2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(params[0]); return static_cast<Z>(i1 / i2); } }; template <typename X, typename Y, typename Z> class TruncateMod { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 % i2); } op_def static Z op(X d1, Y d2, Z *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 % i2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(params[0]); return static_cast<Z>(i1 % i2); } }; template<typename X, typename Y, typename Z> class Remainder { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, params[0]); } }; template <typename X, typename Y, typename Z> class FMod { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, params[0]); } }; template <typename X, typename Y, typename Z> class FloorMod { public: op_def static Z op(X d1, Y d2) { auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2); } op_def static Z op(X d1, Y d2, Z *params) { auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X, typename Y, typename Z> class ReverseDivide { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 / d1); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2 / d1); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(params[0] / d1); } }; template <typename X, typename Y, typename Z> class CopyPws { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1); } }; template <typename X> class Copy { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1; } }; template <typename X, typename Y, typename Z> class Copy2 { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1); } }; template <typename X, typename Y, typename Z> class Axpy { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 + d1); } op_def static Z op(X d1, Y d2, Z *params) { auto alpha = params[0]; return alpha * static_cast<Z>(d1) + static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } }; template <typename X, typename Z> class Assign { public: no_op_exec_special_any no_op_exec_special_any_cuda op_def static Z op(X d1, X *params) { return static_cast<Z>(d1); } }; template <typename X, typename Z> class And { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, X *params) { return static_cast<Z>(119); } }; template <typename X> class IntOr { public: op_def static X op(X d1, X d2) { return d2 | d1; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class IntAnd { public: op_def static X op(X d1, X d2) { return d2 & d1; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class IntXor { public: op_def static X op(X d1, X d2) { return d2 ^ d1; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class ShiftLeft { public: op_def static X op(X d1, X d2) { return d1 << d2; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class ShiftRight { public: op_def static X op(X d1, X d2) { return d1 >> d2; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class CyclicShiftLeft { public: op_def static X op(X d1, X d2) { return d1 << d2 | d1 >> ((sizeof(X) * 8) - d2); } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class CyclicShiftRight { public: op_def static X op(X d1, X d2) { return d1 >> d2 | d1 << ((sizeof(X) * 8) - d2); } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X, typename Z> class Or { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, X *params) { return static_cast<Z>(119); } }; template <typename X, typename Z> class Xor { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Z> class Not { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return static_cast<Z>(0); } op_def static Z op(X d1, X d2, X *params) { return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0); } // this transform op should run only on boolean input op_def static Z op(X d1, X *params) { auto b1 = static_cast<bool>(d1); return !b1; } }; template <typename X, typename Y, typename Z> class LogicalNot { public: op_def static Z op(X d1, Y d2) { return !((int) d1 && (int) d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2))); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<X>(119); } }; template <typename X, typename Y, typename Z> class LogicalXor { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return (i1 | i2) &~ (i1 & i2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(119); } }; template <typename X, typename Y, typename Z> class LogicalAnd { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) & static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(Y d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(119); } }; template <typename X, typename Y, typename Z> class LogicalOr { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) | static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<X>(119); } }; template <typename X, typename Y, typename Z> class Mod { public: /* // just a optional note, feel free to remove later op_def static half op(half d1, half d2, half *params) { return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr)); } */ op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) % static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOp op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X, typename Y, typename Z> class ReverseMod { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d2) % static_cast<int>(d1); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOp op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; /** * Whether 2 elements in an array * are epsilion equal */ template <typename X, typename Z> class Epsilon { public: op_def static Z op(X d1, X d2) { X diff = d1 - d2; X absDiff = nd4j::math::nd4j_abs<X>(diff); if (absDiff <= static_cast<X>(MIN_V)) return static_cast<Z>(1); return static_cast<Z>(0); } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class EqualTo { public: op_def static Z op(X d1, X d2) { return d1 == d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class NotEqualTo { public: op_def static Z op(X d1, X d2) { return d1 != d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class GreaterThanOrEqual { public: op_def static Z op(X d1, X d2) { return d1 >= d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } // FIXME: this signature clashes with MetaOp stuff op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class GreaterThan { public: op_def static Z op(X d1, X d2) { return d1 > d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } // FIXME: this signature clashes with MetaOp stuff op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class LessThan { public: op_def static Z op(X d1, X d2) { return d1 < d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class LessThanOrEqual { public: op_def static Z op(X d1, X d2) { return d1 <= d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X> class Abs { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_abs<X>(d1); } }; template <typename X> class Ceiling { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_ceil<X,X>(d1); } }; template <typename X> class Cosine { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cos<X,X>(d1); } }; template <typename X> class Exp { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_exp<X, X>(d1); } }; template <typename X> class HardTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f)); } }; template <typename X> class HardTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 < static_cast<X>(-1)) return static_cast<X>(-1); else if (d1 > static_cast<X>(1)) return static_cast<X>(1); else return d1; } }; template <typename X> class Floor { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_floor<X,X>(d1); } }; template <typename X> class Log { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(d1); } }; template <typename X> class Log1p { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(1 + d1); } }; template <typename X, typename Y, typename Z> class LogX { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_log<X, Z>(d1) / nd4j::math::nd4j_log<Y, Z>(d2) ; } }; template <typename X> class StabilizeFP16 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 <= static_cast<X>(0)) return static_cast<X>(nd4j::DataTypeUtils::min<float16>()); else return d1; } }; template <typename X> class StabilizeX { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 <= static_cast<X>(0)) return nd4j::DataTypeUtils::min<X>(); else return d1; } }; template <typename X> class SpecialDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * (static_cast<X>(1.f) - d1); } }; template <typename X> class Neg { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return -d1; } }; template <typename X> class Erf { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_erf<X,X>(d1); } }; template <typename X> class Erfc { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_erfc<X,X>(d1); } }; template <typename X> class Reciprocal { public: no_op_exec_special_same no_op_exec_special_same_cuda // op_def static T op(T d1) { // return (T(1.0f) / d1); // } // op for MetaOps op_def static X op(X d1, X *params) { return (static_cast<X>(1) / d1); } }; template <typename X, typename Z> class Sqr { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)); } op_def static Z op(X d1) { return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)); } }; template <typename X, typename Y, typename Z> class RelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_re<X>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class BinaryRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { X threshold = params[0]; return nd4j::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class BinaryMinimumAbsoluteRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, X *params) { X d2 = params[0]; X thresholdRelative = params[1]; X thresholdAbsolute = params[2]; return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0); } op_def static Z op(X d1, Y d2, Z *params) { X thresholdRelative = params[0]; X thresholdAbsolute = params[1]; return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class ReversePow { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_pow<X, X, Z>(params[0], d1); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1); } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Y, typename Z> class Pow { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_pow<X, X, Z>(d1, params[0]); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Y, typename Z> class PowDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return params[0] * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f)); } op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f)); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f)); } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Y, typename Z> class IGamma { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_igamma<X, X, Z>(d1, params[0]); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_igamma<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_igamma<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Y, typename Z> class IGammac { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_igammac<X, X, Z>(d1, params[0]); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_igammac<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_igammac<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } }; template <typename X> class Round { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_round<X,X>(d1); } }; template <typename X, typename Z> class IsNan { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X> class Expm1 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(1); } }; template <typename X, typename Z> class IsPositive { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return d1 > (X)0.f; } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class IsInf { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class IsInfOrNan{ public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) && old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) && old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction != static_cast<X>(0); } }; template <typename X, typename Z> class IsFinite { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(1); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) || old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) || old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction != static_cast<X>(0); } }; template <typename X> class ClipByValue { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 > params[1]) return params[1]; if (d1 < params[0]) return params[0]; return d1; } }; template <typename X, typename Y, typename Z> class LstmClip { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { X _v = (X) d2; if (d1 > _v) return _v; else if (d1 < -_v) return -_v; else return d1; } }; template <typename X> class Swish { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * nd4j::math::nd4j_sigmoid<X,X>(d1); } }; template <typename X> class Mish { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * nd4j::math::nd4j_tanh<X,X>(nd4j::math::nd4j_softplus<X,X>(d1)); } }; template <typename X> class MishDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto ex = nd4j::math::nd4j_exp<X,X>(d1); auto e2x = ex * ex; auto e3x = ex * ex * ex; return (ex * (4 * (d1 + 1) + 4 * e2x + e3x + ex *(4 * d1 + 6))) / nd4j::math::nd4j_pow<X, X, X>((2 * ex + e2x + 2), (X) 2.f); } }; template <typename X> class GELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * nd4j::math::nd4j_sigmoid<X,X>(static_cast<X>(1.702f) * d1); } }; template <typename X> class PreciseGELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto sp = nd4j::math::nd4j_sqrt<X, X>(static_cast<X>(2) / static_cast<X>(M_PI)); auto xp = d1 + nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(0.044715) * d1, static_cast<X>(3)); return (d1 / static_cast<X>(2)) * (static_cast<X>(1) + nd4j::math::nd4j_tanh<X, X>(sp * xp)); } }; template <typename X> class GELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto x17 = static_cast<X>(1.702f) * d1; auto ep = nd4j::math::nd4j_pow<X,X,X>(static_cast<X>(M_E), x17); // (E^(1.702 x) (1. + E^(1.702 x) + 1.702 x))/(1. + E^(1.702 x))^2 return (ep * (static_cast<X>(1.f) + ep + x17)) / nd4j::math::nd4j_pow<X, int, X>((static_cast<X>(1.f) + ep), 2); } }; template <typename X> class PreciseGELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto x79 = static_cast<X>(0.797885) * d1; auto x03 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0356774) * d1, 3); auto x39 = static_cast<X>(0.398942) * d1; auto x05 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0535161) * d1, 3); auto scz = nd4j::math::nd4j_sech<X, X>(x79 + x03); // 0.5 + (0.398942 x + 0.0535161 x^3) Sech[0.797885 x + 0.0356774 x^3]^2 + 0.5 Tanh[0.797885 x + 0.0356774 x^3] return static_cast<X>(0.5) + (x39 + x05) * (scz * scz) + static_cast<X>(0.5) * nd4j::math::nd4j_tanh<X, X>(x79 + x03); } }; template <typename X> class SwishDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X ex = nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1); return (ex * (d1 + ex + static_cast<X>(1.f))) / nd4j::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f)); } }; template <typename X> class LogSigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(nd4j::math::nd4j_sigmoid<X, X>(d1)); } }; template <typename X> class LogSigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X ex = nd4j::math::nd4j_pow<X, X, X>(M_E, d1); return static_cast<X>(1.f) / (ex + static_cast<X>(1.f)); } }; template <typename X> class Sigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sigmoid<X, X>(d1); } }; template <typename X> class Affine { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return params[0] * d1 + params[1]; } }; template <typename X> class SigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sigmoidderivative<X, X>(d1); } }; template <typename X> class HardSigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_min<X>(static_cast<X>(1), nd4j::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f))); } }; template <typename X> class HardSigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f); } }; /** * Scale to be between a min and max */ template <typename X> class SetRange { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto min = params[0]; auto max = params[1]; if (static_cast<X>(d1) >= min && static_cast<X>(d1) <= max) return d1; if (min == static_cast<X>(0) && max == static_cast<X>(1)) { auto val = static_cast<X>(1) / (static_cast<X>(1) + nd4j::math::nd4j_exp<X, X>(-d1)); return (nd4j::math::nd4j_floor<X,X>(val * (max - min)) + min); } return (nd4j::math::nd4j_floor<X,X>(d1 * (max - min)) + min); } }; template <typename X> class Sin { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sin<X,X>(d1); } }; template <typename X> class Square { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * d1; } }; template <typename X, typename Z> class Sqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_sqrt<X, Z>(d1); } }; template <typename X, typename Z> class RSqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return static_cast<Z>(1) / nd4j::math::nd4j_sqrt<X, Z>(d1); } }; template <typename X> class Rint { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_rint<X,X>(d1); } }; template <typename X> class SoftPlus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_softplus<X, X>(d1); } }; template <typename X> class Sign { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0)); } }; template <typename X> class TimesOneMinus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * (static_cast<X>(1) - d1); } }; template <typename X> class RationalTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { // keep 2/3 as runtime variable, to match precision auto dis = (static_cast<X>(2) / static_cast<X>(3)) * d1; auto tanh = nd4j::math::nd4j_sgn<X,X>(dis) * (static_cast<X>(1) - (static_cast<X>(1) / (static_cast<X>(1) + static_cast<X>(nd4j::math::nd4j_abs<X>(dis)) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)) ))); return static_cast<X>(1.7159f) * tanh; } }; template <typename X> class RationalTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1; auto a = static_cast<X>(1.f) + nd4j::math::nd4j_abs<X>(dis) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)); auto tDeriv = (static_cast<X>(1.f) + nd4j::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a); return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv; } }; template <typename X> class Tanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tanh<X, X>(d1); } }; template <typename X> class ScaledTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return params[0] * nd4j::math::nd4j_tanh<X, X>(params[1] * d1); } }; template <typename X> class RectifiedTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_max<X>(static_cast<X>(0), nd4j::math::nd4j_tanh<X,X>(d1)); } }; template <typename X> class RectifiedTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.f) ? nd4j::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f); } }; template <typename X> class ATanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_atanh<X,X>(d1); } }; template <typename X> class TanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tanhderivative<X,X>(d1); } }; template <typename X> class Cube { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * d1 * d1; } }; template <typename X> class CubeDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(3) * d1 * d1; } }; template <typename X> class ACos { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_acos<X, X>(d1); } }; template <typename X> class ASinh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_asinh<X, X>(d1); } }; template <typename X> class ASinhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(nd4j::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f))); } }; template <typename X> class ACosh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_acosh<X, X>(d1); } }; template <typename X> class ACoshDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * nd4j::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f))); } }; template <typename X> class Ones { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.0f); } }; template <typename X> class SoftSign { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_softsign<X, X>(d1); } }; template <typename X> class SoftSignDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_softsignderivative<X,X>(d1); } }; template <typename X, typename Z> class MatchConditionBool { public: no_op_exec_special_bool no_op_exec_special_bool_cuda // this op return 1.0 if condition met, 0.0 otherwise op_def static Z op(X d1, X *extraParams) { X compare = extraParams[0]; X eps = extraParams[1]; auto mode = static_cast<int>(extraParams[2]); //nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode); switch (mode) { case 0: // equals return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false; case 1: // not equals return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? true : false; case 2: // less_than return d1 < compare ? true : false; case 3: // greater_than return d1 > compare ? true : false; case 4: // less_or_equals_than return d1 <= compare ? true : false; case 5: // greater_or_equals_than return d1 >= compare ? true : false; case 6: // abs_less_than return nd4j::math::nd4j_abs<X>(d1) < compare ? true : false; case 7: // abs_greater_than return nd4j::math::nd4j_abs<X>(d1) > compare ? true : false; case 8: // is inf return nd4j::math::nd4j_isinf(d1) ? true : false; case 9: // is nan return nd4j::math::nd4j_isnan(d1) ? true : false; case 10: return (d1 == compare) ? true : false; case 11: return (d1 != compare) ? true : false; case 12: // abs_greater_or_equals_than return nd4j::math::nd4j_abs<X>(d1) >= compare ? true : false; case 13: // abs_less_or_equals_than return nd4j::math::nd4j_abs<X>(d1) <= compare ? true : false; case 14: // isFinite return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)); case 15: // isInfinite return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1); default: printf("Undefined match condition: [%i]\n", mode); } return d1; } }; template <typename X, typename Z> class MatchCondition { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda op_def static Z startingValue(const X *input) { return static_cast<Z>(0); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return old + opOutput; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return old + opOutput; } op_def static Z op(X d1, X compare, X eps, int mode) { switch (mode) { case 0: // equals return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0; case 1: // not equals return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0; case 2: // less_than return d1 < compare ? 1 : 0; case 3: // greater_than return d1 > compare ? 1 : 0; case 4: // less_or_equals_than return d1 <= compare ? 1 : 0; case 5: // greater_or_equals_than return d1 >= compare ? 1 : 0; case 6: // abs_less_than return nd4j::math::nd4j_abs<X>(d1) < compare ? 1 : 0; case 7: // abs_greater_than return nd4j::math::nd4j_abs<X>(d1) > compare ? 1 : 0; case 8: // is inf return nd4j::math::nd4j_isinf(d1) ? 1 : 0; case 9: // is nan return nd4j::math::nd4j_isnan(d1) ? 1 : 0; case 10: return (d1 == compare) ? 1 : 0; case 11: return (d1 != compare) ? 1 : 0; case 12: // abs_greater_or_equals_than return nd4j::math::nd4j_abs<X>(d1) >= compare ? 1 : 0; case 13: // abs_less_or_equals_than return nd4j::math::nd4j_abs<X>(d1) <= compare ? 1 : 0; case 14: // isFinite return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)) ? 1 : 0; case 15: // isInfinite return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1) ? 1 : 0; default: printf("Undefined match condition: [%i]\n", mode); } return d1; } // this op return 1.0 if condition met, 0.0 otherwise op_def static Z op(X d1, X compare, X *extraParams) { X eps = extraParams[1]; auto mode = static_cast<int>(extraParams[0]); return op(d1, compare, eps, mode); } // this op return 1.0 if condition met, 0.0 otherwise op_def static Z op(X d1, X *extraParams) { X compare = extraParams[0]; X eps = extraParams[1]; auto mode = static_cast<int>(extraParams[2]); return op(d1, compare, eps, mode); } op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Y, typename Z> class ELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_elu<X,Z>(d1, static_cast<X>(d2)); } }; template <typename X, typename Y, typename Z> class ELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_eluderivative<X,Z>(d1, static_cast<X>(d2)); } }; template <typename X, typename Y, typename Z> class RELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { auto xt = static_cast<Z>(d1); auto xf = static_cast<Z>(d2); return xt < xf ? xf : xt; } }; template <typename X, typename Y, typename Z> class SXELogitsSmoother { public: op_def static Z op(X d1, Y d2, Z *params) { return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2; } }; template <typename X, typename Y, typename Z> class RELU6 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params); return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6); } }; template <typename X, typename Y, typename Z> class LeakyRELU { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { auto val = static_cast<Z>(d1); auto alpha = static_cast<Z>(d2); return val < 0.0f ? alpha * val : val; } }; template <typename X> class SELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.0f) ? static_cast<X>(SELU_LAMBDA) * static_cast<X>(d1) : static_cast<X>(SELU_LAMBDA) * (static_cast<X>(SELU_ALPHA) * nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(SELU_ALPHA)); } }; template <typename X> class SELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * nd4j::math::nd4j_exp<X, X>(d1); } }; template <typename X, typename Y, typename Z> class LeakyRELUDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { if (d1 >= static_cast<X>(0)) return static_cast<Z>(1); else return static_cast<Z>(d2); } }; template <typename X> class ASin { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_asin<X,X>(d1); } }; template <typename X> class Sinh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sinh<X,X>(d1); } }; template <typename X> class SinhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cosh<X, X>(d1); } }; template <typename X> class Cosh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cosh<X,X>(d1); } }; template <typename X> class Tan { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tan<X,X>(d1); } }; template <typename X> class TanDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / nd4j::math::nd4j_pow<X, X, X>(nd4j::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f)); } }; template <typename X> class ATan { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_atan<X, X>(d1); } }; template <typename X, typename Y, typename Z> class Atan2 { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_atan2<X, Z>(d2, d1); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOps op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X> class Identity { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1; } }; template <typename X> class Stabilize { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X k = params[0]; if (d1 * k > static_cast<X>(- MIN_CUTFOFF)) return static_cast<X>(- MIN_CUTFOFF) / k; else if (d1 * k < static_cast<X>(MIN_CUTFOFF)) return static_cast<X>(MIN_CUTFOFF) / k; return d1; } }; template <typename X, typename Y, typename Z> class Step { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0)); } }; template <typename X> class OneMinus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1) - d1; } }; template <typename X> class Sum { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static X merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X> class ReduceSameBenchmarkOp { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static X merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X op(X d1, X *extraParams) { auto f1 = static_cast<float>(d1); return static_cast<X>(nd4j::math::nd4j_pow<float,float,float>(f1, 3) + nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1) / nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1) * nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1) - nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1)); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class ShannonEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { auto p = d1 * d1; return static_cast<Z>(p) * nd4j::math::nd4j_log<X, Z>(p); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return -reduction; } }; template <typename X, typename Z> class LogEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { //entropy is -sum(p(x) * log(p(x))); log entropy is log of this return nd4j::math::nd4j_log<Z, Z>(-reduction); } }; template <typename X, typename Z> class Entropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x))) } }; template <typename X> class ASum { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::ASUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X, typename Z> class CountNonZero { public: no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::ASUM; op_def static Z startingValue(const X *input) { return static_cast<Z>(0); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f); } op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class CountZero { public: no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static Z startingValue(const X *input) { return static_cast<Z>(0.0f); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0); } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return static_cast<Z>(reduction); } }; template <typename X> class Prod { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT; op_def static X startingValue(const X *input) { return static_cast<X>(1); } op_def static X merge(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static X update(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class Any { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ; } }; template <typename X, typename Z> class All { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT; op_def static X startingValue(const X *input) { return static_cast<X>(1); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static Z op(X d1, X *extraParams) { return d1; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0); } }; template <typename X, typename Z> class Mean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return d1; } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return reduction / (Z) n; } }; template <typename X, typename Z> class ReduceFloatBenchmarkOp { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { auto f1 = static_cast<float>(d1); return static_cast<Z>(nd4j::math::nd4j_pow<float,float,float>(f1, 3) + nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1) / nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1) * nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1) - nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1)); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return (Z) reduction / (Z) n; } }; template <typename X, typename Z> class AMean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_abs<Z>(reduction) / static_cast<Z>(n); } }; template <typename X> class Max { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::MAX; op_def static X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(old, opOutput); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(opOutput, old); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_max<X>(d1, d2); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_max<X>(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Y, typename Z> class AMaxPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1, Y d2) { auto z1 = static_cast<Z>(d1); auto z2 = static_cast<Z>(d2); if (nd4j::math::nd4j_abs<Z>(z1) > nd4j::math::nd4j_abs<Z>(z2)) return z1; else return z2; } }; template <typename X, typename Y, typename Z> class AMinPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1, Y d2) { auto z1 = static_cast<Z>(d1); auto z2 = static_cast<Z>(d2); if (nd4j::math::nd4j_abs<Z>(z1) < nd4j::math::nd4j_abs<Z>(z2)) return z1; else return z2; } }; template <typename X, typename Y, typename Z> class MaxPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } }; template <typename X, typename Y, typename Z> class MinPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } }; template <typename X> class AMax { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::AMAX; op_def static X startingValue(const X *input) { return input[0]; } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput)); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old)); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_abs<X>(d1) > nd4j::math::nd4j_abs<X>(d2) ? d1 : d2; } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X> class AMin { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::AMIN; op_def static X startingValue(const X *input) { return input[0]; } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput)); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old)); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X> class Min { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::MIN; op_def static X startingValue(const X *input) { return nd4j::DataTypeUtils::infOrMax<X>(); } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(old, opOutput); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(opOutput, old); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_min<X>(d1, d2); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_min<X>(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class Norm1 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(nd4j::math::nd4j_abs<X>(d1)); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return reduction; } }; template <typename X, typename Z> class Norm2 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_sqrt<Z, Z>(reduction); } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1 * d1); } }; template <typename X, typename Z> class SquaredNorm { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1 * d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return reduction; } }; template <typename X, typename Z> class NormFrobenius { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { X v = nd4j::math::nd4j_abs<X>(d1); return static_cast<Z>(v * v); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_sqrt<Z, Z>(reduction); } }; template <typename X, typename Z> class NormP { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return nd4j::math::nd4j_pow<X, Z, Z>(nd4j::math::nd4j_abs<X>(d1), extraParams[0]); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_pow<Z, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]); } }; template <typename X, typename Z> class NormMax { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(old), nd4j::math::nd4j_abs<Z>(opOutput)); } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(reduction), nd4j::math::nd4j_abs<Z>(reduction)); } }; template <typename X, typename Z> class Variance { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z update(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static X op(X d1, Z *extraParams) { X mean = static_cast<X>(extraParams[0]); X ret = d1 - mean; return ret * ret; } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { // T bias = extraParams[1]; // return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1) return static_cast<Z>(reduction) / static_cast<Z>(n - 1); } }; /** * Standard deviation of a buffer */ template <typename X, typename Z> class StandardDeviation { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z update(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z op(X d1, Z *extraParams) { X mean = extraParams[0]; X ret = d1 - mean; return ret * ret; } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams); Z sqrtRet = nd4j::math::nd4j_sqrt<X, Z>(ret); return sqrtRet; } }; template <typename X, typename Y> class CosineSimilarity { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1])); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(d1 * d1); extraParams[1] += static_cast<Y>(d2 * d2); return static_cast<Y>(d1 * d2); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2)); return static_cast<Y>(d1 * d2); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class JaccardDistance { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { // num / denom return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]); } op_def static Y num(X d1, X d2) { return nd4j::math::nd4j_min<X>(d1, d2); } op_def static Y denom(X d1, X d2) { return nd4j::math::nd4j_max<X>(d1, d2); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(num(d1, d2)); extraParams[1] += static_cast<Y>(denom(d1, d2)); return static_cast<Y>(0.0f); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2)); return static_cast<Y>(0.0f); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class SimpleHammingDistance { public: static const int extraParamsLen = 0; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return static_cast<Y>(reduction / n); } op_def static Y op(X d1, X d2, Y *extraParams) { return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParams) { return op(d1, d2, extraParams); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class CosineDistance { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return (static_cast<Y>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1]))); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d1) * nd4j::math::nd4j_abs<X>(d1)); extraParams[1] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d2) * nd4j::math::nd4j_abs<X>(d2)); return (d1 * d2); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<Y>(d1) * nd4j::math::nd4j_abs<Y>(d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<Y>(d2) * nd4j::math::nd4j_abs<Y>(d2)); return (d1 * d2); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; /** * Dot product between 2 arrays */ template <typename X, typename Y> class Dot { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op //delete[] * extraParamsRef; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return reduction; } op_def static Y op(X d1, X d2, Y *extraParamsRef) { return static_cast<Y>(d1 * d2); } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return opOutput + old; } op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {} }; /** * Op to check equality within arrays */ template <typename X, typename Z> class EqualsWithEps { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Z startingValue(const X *input) { return static_cast<Z>(0.0f); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) { return reduction; } op_def static Z op(X d1, X d2, Z *extraParamsRef) { double eps = nd4j::math::nd4j_abs<double>(extraParamsRef[2]); return static_cast<Z>(!nd4j::math::nd4j_eq<X>(d1, d2, eps)); } #ifdef __CUDACC__ __device__ static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) { return opOutput + old; } op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {} }; template <typename X, typename Y> class EuclideanDistance { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return nd4j::math::nd4j_sqrt<Y, Y>(reduction); } op_def static Y op(X d1, X d2, Y *extraParamsRef) { X ret = d1 - d2; return static_cast<Y>(ret * ret); } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return opOutput + old; } op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {} }; template <typename X, typename Y> class ManhattanDistance { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return reduction; } op_def static Y op(X d1, X d2, Y *extraParamsRef) { return nd4j::math::nd4j_abs<X>(d1 - d2); } op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return old + opOutput; } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif #ifndef __clang__ #pragma omp declare simd uniform(extraParamsRef) #endif op_def static Y merge(X old, X opOutput, X *extraParamsRef) { return update(old, opOutput, extraParamsRef); } }; template <typename X, typename Z> class IndexAbsoluteMax { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return nd4j::math::nd4j_abs<X>(val); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value); old.value = nd4j::math::nd4j_abs<X>(old.value); if (opOutput.value > old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (nd4j::math::nd4j_abs<X>(f1.value) > nd4j::math::nd4j_abs<X>(f2.value)) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline X startingValue(const X *input) { return 0; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class FirstIndex { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams); //printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index); if (res == static_cast<X>(0)) return old; if (old.index < 0) return opOutput; if (old.index > opOutput.index) return opOutput; return old; } static _CUDA_HD inline X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = -1; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.index > f2.index) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } }; template <typename X, typename Z> class LastIndex { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams); if (res == static_cast<X>(0)) return old; if (old.index < 0) return opOutput; if (old.index < opOutput.index) return opOutput; return old; } static _CUDA_HD inline X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = -1; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.index < f2.index) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } }; template <typename X, typename Z> class IndexMax { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { if (opOutput.value > old.value) { return opOutput; } #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.value > f2.value) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class IndexAbsoluteMin { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op( functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD inline X startingValue(const X *input) { return nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value); old.value = nd4j::math::nd4j_abs<X>(old.value); if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (nd4j::math::nd4j_abs<X>(f1.value) < nd4j::math::nd4j_abs<X>(f2.value)) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class IndexMin { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op( functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD inline X startingValue(const X *input) { return nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.value < f2.value) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class SummaryStatsVariance { public: static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) { if (biasCorrected) { Z ret = static_cast<Z>(val.varianceBiasCorrected()); if (ret < static_cast<Z>(0.0f)) return static_cast<Z>(val.variance()); return ret; } return static_cast<Z>(val.variance()); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) { return d1; } }; template <typename X, typename Z> class SummaryStatsStandardDeviation { public: static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) { if (biasCorrected) { auto ret = static_cast<Z>(val.varianceBiasCorrected()); if (ret < static_cast<Z>(0.0f)) return nd4j::math::nd4j_sqrt<double, Z>(val.variance()); else return nd4j::math::nd4j_sqrt<double, Z>(ret); } return nd4j::math::nd4j_sqrt<double, Z>(val.variance()); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) { return d1; } }; template <typename X> class DropOut { public: no_op_exec_special_same no_op_exec_special_same_cuda inline _CUDA_D static X op(X d1, X *params) { X prob = params[0]; #ifdef __CUDACC__ X length = params[1]; X tid = blockIdx.x * blockDim.x + threadIdx.x; X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid))); #else X rnd = static_cast<X>(rand() / RAND_MAX); #endif return rnd >= prob ? static_cast<X>(0.0f) : d1; } }; template <typename X, typename Y, typename Z> class DropOutInverted { public: no_op_exec_special no_op_exec_special_cuda #ifdef __CUDACC__ __device__ #endif inline static Z op(X d1, Y d2, Z *params) { Y prob = d2; #ifdef __CUDACC__ X length = params[1]; X tid = blockIdx.x * blockDim.x + threadIdx.x; X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid))); #else X rnd = static_cast<X>(rand() / RAND_MAX); #endif return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob)); } }; template <typename X, typename Y, typename Z> class ReplaceNans { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ; } }; // this op is used for conditional pairwise transforms only template <typename X, typename Y, typename Z> class CompareAndReplace{ public: // op definition for PairWise Transform op_def static Z op(X d1, Y d2, Z *params) { auto zd1 = static_cast<Z>(d1); auto zd2 = static_cast<Z>(d2); auto compare = params[0]; auto eps = params[2]; int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<Z>(zd1 - compare) <= eps) return zd2; else return zd1; else if (mode == 1) // not equals eps if (nd4j::math::nd4j_abs<Z>(zd1 - compare) > eps) return zd2; else return zd1; else if (mode == 2) // less_than eps if (zd1 < compare) return zd2; else return zd1; else if (mode ==3) // greater_than if (zd1 > compare) return zd2; else return zd1; else if (mode == 4) // less_or_equals_than if (zd1 <= compare) return zd2; else return zd1; else if (mode == 5) // greater_or_equals_than if (zd1 >= compare) return zd2; else return zd1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<Z>(zd1) < compare) return zd2; else return zd1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<Z>(zd1) > compare) return zd2; else return zd1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(zd1)) return zd2; else return zd1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(zd1)) return zd2; else return zd1; else if (mode == 10) if (zd1 == compare) return zd2; else return zd1; else if (mode == 11) if (zd1 != compare) return zd2; else return zd1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<Z>(zd1) >= compare) return zd2; else return zd1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<Z>(zd1) <= compare) return zd2; else return zd1; else printf("Undefined boolean operation: [%i]\n", mode); return zd1; } }; template <typename X, typename Y, typename Z> class CompareAndSet { public: // op definition for PairWise Transform op_def static Z op(X dX, Y dY, Z *params) { auto d1 = static_cast<Z>(dX); auto d2 = static_cast<Z>(dY); auto compare = params[0]; auto eps = params[2]; auto mode = static_cast<int>(params[3]); if (mode == 0) // equals if (nd4j::math::nd4j_abs<Z>(d2 - compare) <= eps) return d2; else return d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<Z>(d2 - compare) > eps) return d2; else return d1; else if (mode == 2) // less_than if (d2 < compare) return d2; else return d1; else if (mode ==3) // greater_than if (d2 > compare) return d2; else return d1; else if (mode == 4) // less_or_equals_than if (d2 <= compare) return d2; else return d1; else if (mode == 5) // greater_or_equals_than if (d2 >= compare) return d2; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<Z>(d2) < compare) return d2; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<Z>(d2) > compare) return d2; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d2)) return d2; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d2)) return d2; else return d1; else if (mode == 10) if (d2 == compare) return d2; else return d1; else if (mode == 11) if (d2 != compare) return d2; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<Z>(d1) >= compare) return d2; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<Z>(d1) <= compare) return d2; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; template <typename X> class CompareAndSetTransform { public: no_op_exec_special_same no_op_exec_special_same_cuda // op definition for Transform op_def static X op(X d1, X *params) { auto compare = params[0]; auto set = params[1]; auto eps = params[2]; // with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<X>(d1 - compare) <= eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<X>(d1 - compare) > eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1; else if (mode == 2) // less_than if (d1 < compare) return set; else return d1; else if (mode ==3) // greater_than if (d1 > compare) return set; else return d1; else if (mode == 4) // less_or_equals_than if (d1 <= compare) return set; else return d1; else if (mode == 5) // greater_or_equals_than if (d1 >= compare) return set; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<X>(d1) < compare) return set; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<X>(d1) > compare) return set; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d1)) return set; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d1)) return set; else return d1; else if (mode == 10) if (d1 == compare) return set; else return d1; else if (mode == 11) if (d1 != compare) return set; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<X>(d1) >= compare) return set; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<X>(d1) <= compare) return set; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; } #endif
decorate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE % % D D E C O O R R A A T E % % D D EEE C O O RRRR AAAAA T EEE % % D D E C O O R R A A T E % % DDDD EEEEE CCCC OOO R R A A T EEEEE % % % % % % MagickCore Image Decoration Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-view.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" /* Define declarations. */ #define AccentuateModulate ScaleCharToQuantum(80) #define HighlightModulate ScaleCharToQuantum(125) #define ShadowModulate ScaleCharToQuantum(135) #define DepthModulate ScaleCharToQuantum(185) #define TroughModulate ScaleCharToQuantum(110) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B o r d e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BorderImage() surrounds the image with a border of the color defined by % the bordercolor member of the image structure. The width and height % of the border are defined by the corresponding members of the border_info % structure. % % The format of the BorderImage method is: % % Image *BorderImage(const Image *image,const RectangleInfo *border_info, % const CompositeOperator compose,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o border_info: define the width and height of the border. % % o compose: the composite operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BorderImage(const Image *image, const RectangleInfo *border_info,const CompositeOperator compose, ExceptionInfo *exception) { Image *border_image, *clone_image; FrameInfo frame_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(border_info != (RectangleInfo *) NULL); frame_info.width=image->columns+(border_info->width << 1); frame_info.height=image->rows+(border_info->height << 1); frame_info.x=(ssize_t) border_info->width; frame_info.y=(ssize_t) border_info->height; frame_info.inner_bevel=0; frame_info.outer_bevel=0; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); clone_image->matte_color=image->border_color; border_image=FrameImage(clone_image,&frame_info,compose,exception); clone_image=DestroyImage(clone_image); if (border_image != (Image *) NULL) border_image->matte_color=image->matte_color; return(border_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F r a m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FrameImage() adds a simulated three-dimensional border around the image. % The color of the border is defined by the matte_color member of image. % Members width and height of frame_info specify the border width of the % vertical and horizontal sides of the frame. Members inner and outer % indicate the width of the inner and outer shadows of the frame. % % The format of the FrameImage method is: % % Image *FrameImage(const Image *image,const FrameInfo *frame_info, % const CompositeOperator compose,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o frame_info: Define the width and height of the frame and its bevels. % % o compose: the composite operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info, const CompositeOperator compose,ExceptionInfo *exception) { #define FrameImageTag "Frame/Image" CacheView *image_view, *frame_view; Image *frame_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo accentuate, highlight, matte, shadow, trough; register ssize_t x; size_t bevel_width, height, width; ssize_t y; /* Check frame geometry. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(frame_info != (FrameInfo *) NULL); if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0)) ThrowImageException(OptionError,"FrameIsLessThanImageSize"); bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel); x=(ssize_t) frame_info->width-frame_info->x-bevel_width; y=(ssize_t) frame_info->height-frame_info->y-bevel_width; if ((x < (ssize_t) image->columns) || (y < (ssize_t) image->rows)) ThrowImageException(OptionError,"FrameIsLessThanImageSize"); /* Initialize framed image attributes. */ frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue, exception); if (frame_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(frame_image,DirectClass,exception) == MagickFalse) { frame_image=DestroyImage(frame_image); return((Image *) NULL); } if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) && (IsGrayColorspace(frame_image->colorspace) != MagickFalse)) (void) SetImageColorspace(frame_image,sRGBColorspace,exception); if ((frame_image->matte_color.alpha_trait != UndefinedPixelTrait) && (frame_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(frame_image,OpaqueAlpha,exception); frame_image->page=image->page; if ((image->page.width != 0) && (image->page.height != 0)) { frame_image->page.width+=frame_image->columns-image->columns; frame_image->page.height+=frame_image->rows-image->rows; } /* Initialize 3D effects color. */ matte=image->matte_color; accentuate=matte; accentuate.red=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate))); accentuate.green=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate))); accentuate.blue=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate))); accentuate.black=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.black+(QuantumRange*AccentuateModulate))); accentuate.alpha=matte.alpha; highlight=matte; highlight.red=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.red+(QuantumRange*HighlightModulate))); highlight.green=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.green+(QuantumRange*HighlightModulate))); highlight.blue=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate))); highlight.black=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.black+(QuantumRange*HighlightModulate))); highlight.alpha=matte.alpha; shadow=matte; shadow.red=QuantumScale*matte.red*ShadowModulate; shadow.green=QuantumScale*matte.green*ShadowModulate; shadow.blue=QuantumScale*matte.blue*ShadowModulate; shadow.black=QuantumScale*matte.black*ShadowModulate; shadow.alpha=matte.alpha; trough=matte; trough.red=QuantumScale*matte.red*TroughModulate; trough.green=QuantumScale*matte.green*TroughModulate; trough.blue=QuantumScale*matte.blue*TroughModulate; trough.black=QuantumScale*matte.black*TroughModulate; trough.alpha=matte.alpha; status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); frame_view=AcquireAuthenticCacheView(frame_image,exception); height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+ frame_info->inner_bevel); if (height != 0) { register ssize_t x; register Quantum *magick_restrict q; /* Draw top of ornamental border. */ q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns, height,exception); if (q != (Quantum *) NULL) { /* Draw top of ornamental border. */ for (y=0; y < (ssize_t) frame_info->outer_bevel; y++) { for (x=0; x < (ssize_t) (frame_image->columns-y); x++) { if (x < y) SetPixelViaPixelInfo(frame_image,&highlight,q); else SetPixelViaPixelInfo(frame_image,&accentuate,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) frame_image->columns; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_image->columns-2*frame_info->outer_bevel; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } for (y=0; y < (ssize_t) frame_info->inner_bevel; y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } width=image->columns+((size_t) frame_info->inner_bevel << 1)- y; for (x=0; x < (ssize_t) width; x++) { if (x < y) SetPixelViaPixelInfo(frame_image,&shadow,q); else SetPixelViaPixelInfo(frame_image,&trough,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } (void) SyncCacheViewAuthenticPixels(frame_view,exception); } } /* Draw sides of ornamental border. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,frame_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; size_t width; /* Initialize scanline with matte color. */ if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y, frame_image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } /* Set frame interior pixels. */ for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(frame_image,&frame_image->border_color,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FrameImage) #endif proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } height=(size_t) (frame_info->inner_bevel+frame_info->height- frame_info->y-image->rows-bevel_width+frame_info->outer_bevel); if (height != 0) { register ssize_t x; register Quantum *magick_restrict q; /* Draw bottom of ornamental border. */ q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows- height),frame_image->columns,height,exception); if (q != (Quantum *) NULL) { /* Draw bottom of ornamental border. */ for (y=frame_info->inner_bevel-1; y >= 0; y--) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < y; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++) { if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y)) SetPixelViaPixelInfo(frame_image,&highlight,q); else SetPixelViaPixelInfo(frame_image,&accentuate,q); q+=GetPixelChannels(frame_image); } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } height=frame_info->height-frame_info->y-image->rows-bevel_width; for (y=0; y < (ssize_t) height; y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_image->columns-2*frame_info->outer_bevel; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } for (y=frame_info->outer_bevel-1; y >= 0; y--) { for (x=0; x < y; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) frame_image->columns; x++) { if (x >= (ssize_t) (frame_image->columns-y)) SetPixelViaPixelInfo(frame_image,&shadow,q); else SetPixelViaPixelInfo(frame_image,&trough,q); q+=GetPixelChannels(frame_image); } } (void) SyncCacheViewAuthenticPixels(frame_view,exception); } } frame_view=DestroyCacheView(frame_view); image_view=DestroyCacheView(image_view); x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+ frame_info->inner_bevel); y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+ frame_info->inner_bevel); if (status != MagickFalse) status=CompositeImage(frame_image,image,compose,MagickTrue,x,y, exception); if (status == MagickFalse) frame_image=DestroyImage(frame_image); return(frame_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RaiseImage() creates a simulated three-dimensional button-like effect % by lightening and darkening the edges of the image. Members width and % height of raise_info define the width of the vertical and horizontal % edge of the effect. % % The format of the RaiseImage method is: % % MagickBooleanType RaiseImage(const Image *image, % const RectangleInfo *raise_info,const MagickBooleanType raise, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o raise_info: Define the width and height of the raise area. % % o raise: A value other than zero creates a 3-D raise effect, % otherwise it has a lowered effect. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RaiseImage(Image *image, const RectangleInfo *raise_info,const MagickBooleanType raise, ExceptionInfo *exception) { #define AccentuateFactor ScaleCharToQuantum(135) #define HighlightFactor ScaleCharToQuantum(190) #define ShadowFactor ScaleCharToQuantum(190) #define RaiseImageTag "Raise/Image" #define TroughFactor ScaleCharToQuantum(135) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; Quantum foreground, background; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(raise_info != (RectangleInfo *) NULL); if ((image->columns <= (raise_info->width << 1)) || (image->rows <= (raise_info->height << 1))) ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth", image->filename); foreground=QuantumRange; background=(Quantum) 0; if (raise == MagickFalse) { foreground=(Quantum) 0; background=QuantumRange; } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Raise image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,raise_info->height,1) #endif for (y=0; y < (ssize_t) raise_info->height; y++) { register ssize_t i, x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < y; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double) foreground*(QuantumRange-HighlightFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) (image->columns-y); x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*AccentuateFactor+ (double) foreground*(QuantumRange-AccentuateFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double) background*(QuantumRange-ShadowFactor))); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows-2*raise_info->height,1) #endif for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++) { register ssize_t i, x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) raise_info->width; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double) foreground*(QuantumRange-HighlightFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) (image->columns-raise_info->width); x++) q+=GetPixelChannels(image); for ( ; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double) background*(QuantumRange-ShadowFactor))); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows-raise_info->height,1) #endif for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++) { register ssize_t i, x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->rows-y); x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double) foreground*(QuantumRange-HighlightFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*TroughFactor+ (double) background*(QuantumRange-TroughFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double) background*(QuantumRange-ShadowFactor))); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
GB_unop__identity_uint16_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint16_int16 // op(A') function: GB_unop_tran__identity_uint16_int16 // C type: uint16_t // A type: int16_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint16_int16 ( uint16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint16_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__isinf_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isinf_bool_fp64) // op(A') function: GB (_unop_tran__isinf_bool_fp64) // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isinf (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isinf (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isinf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isinf_bool_fp64) ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isinf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isinf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isinf_bool_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
shock_detection_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Ruben Zorrilla // #ifndef KRATOS_SHOCK_DETECTION_PROCESS #define KRATOS_SHOCK_DETECTION_PROCESS // System includes #include <string> #include <iostream> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "processes/process.h" #include "processes/calculate_nodal_area_process.h" #include "processes/compute_nodal_gradient_process.h" #include "processes/find_global_nodal_neighbours_process.h" #include "containers/global_pointers_vector.h" // Application includes #include "fluid_dynamics_application_variables.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Classes ///@{ /** * @brief Auxiliary neighbour data class * Auxiliary class to retrieve the data from the neighbours when communicating the pointers * @tparam TShockVariableType Shock variable type * @tparam TShockGradientVariableType Shock gradient variable type */ template<class TShockVariableType, class TShockGradientVariableType> class NeighbourData { public: ///@name Type Definitions ///@{ /// Pointer definition of NeighbourData KRATOS_CLASS_POINTER_DEFINITION(NeighbourData); ///@} ///@name Life Cycle ///@{ /** * @brief Construct a new Neighbour Data object * Default neighbour data container constructor * Required to compile the class */ NeighbourData() = default; /** * @brief Construct a new Neighbour Data object * Constructes a new neighbour data container instance * @param rShockVariableValue Neighbour shock variable value * @param rShockGradientVariableValue Neighbour shock variable gradient value * @param rCoordinates Neighbour node coordinates */ NeighbourData( const typename TShockVariableType::Type& rShockVariableValue, const typename TShockGradientVariableType::Type& rShockGradientVariableValue, const array_1d<double, 3>& rCoordinates) { mCoordinates = rCoordinates; mShockVariableValue = rShockVariableValue; mShockGradientVariableValue = rShockGradientVariableValue; } ///@} ///@name Member Variables ///@{ array_1d<double, 3> mCoordinates; typename TShockVariableType::Type mShockVariableValue; typename TShockGradientVariableType::Type mShockGradientVariableValue; ///@} private: ///@name Serialization ///@{ friend class Serializer; void save(Serializer& rSerializer) const { rSerializer.save("mCoordinates",mCoordinates); rSerializer.save("mShockVariableValue",mShockVariableValue); rSerializer.save("mShockGradientVariableValue",mShockGradientVariableValue); } void load(Serializer& rSerializer) { rSerializer.load("mCoordinates",mCoordinates); rSerializer.load("mShockVariableValue",mShockVariableValue); rSerializer.load("mShockGradientVariableValue",mShockGradientVariableValue); } ///@} }; /// Main class for shock detection /** This class implements some utilities for the detection of sharp discontinuitites (shocks) in the FE solution */ class KRATOS_API(FLUID_DYNAMICS_APPLICATION) ShockDetectionProcess : public Process { public: ///@name Type Definitions ///@{ /// Pointer definition of ShockDetectionProcess KRATOS_CLASS_POINTER_DEFINITION(ShockDetectionProcess); /// Variable component type typedef VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > VariableComponentType; /// Node pointer type typedef typename Node<3>::Pointer NodePointerType; ///@} ///@name Life Cycle ///@{ /// Default constructor. ShockDetectionProcess() = default; /// Constructor with default shock sensor variable for double shock variable ShockDetectionProcess( ModelPart& rModelPart, const Variable<double>& rShockDoubleVariable, const Variable<array_1d<double,3>>& rShockGradientVariable, const bool UpdateNodalAreaAtEachStep = false, const bool UpdateNodalNeighboursAtEachStep = false) : Process() , mrModelPart(rModelPart) , mUpdateNodalAreaAtEachStep(UpdateNodalAreaAtEachStep) , mUpdateNodalNeighboursAtEachStep(UpdateNodalNeighboursAtEachStep) , mShockVariableIsDouble(true) , mpShockDoubleVariable(&rShockDoubleVariable) , mpShockGradientVariable(&rShockGradientVariable) , mpShockSensorVariable(&SHOCK_SENSOR) {} /// Constructor with default shock sensor variable for component shock variable ShockDetectionProcess( ModelPart& rModelPart, const VariableComponentType& rShockComponentVariable, const Variable<array_1d<double,3>>& rShockGradientVariable, const bool UpdateNodalAreaAtEachStep = false, const bool UpdateNodalNeighboursAtEachStep = false) : Process() , mrModelPart(rModelPart) , mUpdateNodalAreaAtEachStep(UpdateNodalAreaAtEachStep) , mUpdateNodalNeighboursAtEachStep(UpdateNodalNeighboursAtEachStep) , mShockVariableIsDouble(false) , mpShockComponentVariable(&rShockComponentVariable) , mpShockGradientVariable(&rShockGradientVariable) , mpShockSensorVariable(&SHOCK_SENSOR) {} /// Constructor with custom shock sensor variable for double shock variable ShockDetectionProcess( ModelPart& rModelPart, const Variable<double>& rShockDoubleVariable, const Variable<array_1d<double,3>>& rShockGradientVariable, const Variable<double>& rShockSensorVariable, const bool UpdateNodalAreaAtEachStep = false, const bool UpdateNodalNeighboursAtEachStep = false) : Process() , mrModelPart(rModelPart) , mUpdateNodalAreaAtEachStep(UpdateNodalAreaAtEachStep) , mUpdateNodalNeighboursAtEachStep(UpdateNodalNeighboursAtEachStep) , mShockVariableIsDouble(true) , mpShockDoubleVariable(&rShockDoubleVariable) , mpShockGradientVariable(&rShockGradientVariable) , mpShockSensorVariable(&rShockSensorVariable) {} /// Constructor with custom shock sensor variable for component shock variable ShockDetectionProcess( ModelPart& rModelPart, const VariableComponentType& rShockComponentVariable, const Variable<array_1d<double,3>>& rShockGradientVariable, const Variable<double>& rShockSensorVariable, const bool UpdateNodalAreaAtEachStep = false, const bool UpdateNodalNeighboursAtEachStep = false) : Process() , mrModelPart(rModelPart) , mUpdateNodalAreaAtEachStep(UpdateNodalAreaAtEachStep) , mUpdateNodalNeighboursAtEachStep(UpdateNodalNeighboursAtEachStep) , mShockVariableIsDouble(false) , mpShockComponentVariable(&rShockComponentVariable) , mpShockGradientVariable(&rShockGradientVariable) , mpShockSensorVariable(&rShockSensorVariable) {} /// Destructor. virtual ~ShockDetectionProcess() = default; /// Assignment operator. ShockDetectionProcess &operator=(ShockDetectionProcess const &rOther) = delete; /// Copy constructor. ShockDetectionProcess(ShockDetectionProcess const &rOther) = delete; ///@} ///@name Operations ///@{ /** * @brief Initializes the values for the shock detection * This method initializes the nodal mass, that is required for the nodal gradients * calculation, and the nodal neighbours. * It has to be executed once (in case there is no mesh deformation nor topology changes) */ void ExecuteInitialize() override; /** * @brief Calculates the edge based shock detection * This method performs the edge based shock detection */ void ExecuteInitializeSolutionStep() override; /** * @brief This method performs all the operations * This method perform all the operations that are required for the shock detection */ void Execute() override; /** * @brief Perform edge based shock detection * This method performs the edge based shock detection * @param rShockVariable Double variable to perform the shock detection * @param rShockGradientVariable Vector variable to calculate the shock variable gradients */ void EdgeBasedShockDetection( const Variable<double>& rShockVariable, const Variable<array_1d<double, 3>>& rShockGradientVariable); /** * @brief Perform edge based shock detection * This method performs the edge based shock detection * @param rShockVariable Component variable to perform the shock detection * @param rShockGradientVariable Vector variable to calculate the shock variable gradients */ void EdgeBasedShockDetection( const VariableComponentType& rShockVariable, const Variable<array_1d<double, 3>>& rShockGradientVariable); /** * @brief Template specialization of the edge based shock detection function * Auxiliary method to specialize the variable types for the edge based shock detection * @tparam TShockVariableType Shock variable type * @tparam TShockGradientVariableType Shock gradient variable type * @param rShockVariable Component variable to perform the shock detection * @param rShockGradientVariable Vector variable to calculate the shock variable gradients */ template<class TShockVariableType, class TShockGradientVariableType> void EdgeBasedShockDetectionSpecialization( const TShockVariableType& rShockVariable, const TShockGradientVariableType& rShockGradientVariable) { // If required recompute the NODAL_AREA // This is required for the nodal gradients calculation if (mUpdateNodalAreaAtEachStep) { CalculateNodalAreaProcess<CalculateNodalAreaSettings::SaveAsNonHistoricalVariable>( mrModelPart, mrModelPart.GetProcessInfo().GetValue(DOMAIN_SIZE)).Execute(); } // If required recompute the NODAL_NEIGHBOURS if (mUpdateNodalNeighboursAtEachStep) { const auto& r_data_communicator = mrModelPart.GetCommunicator().GetDataCommunicator(); FindGlobalNodalNeighboursProcess(r_data_communicator, mrModelPart).Execute(); } // Calculate the shock variable nodal gradients ComputeNodalGradientProcess<ComputeNodalGradientProcessSettings::SaveAsNonHistoricalVariable>( mrModelPart, rShockVariable, rShockGradientVariable).Execute(); auto& r_comm = mrModelPart.GetCommunicator(); auto& r_data_comm = r_comm.GetDataCommunicator(); // Create the global pointers list GlobalPointersVector<Node<3>> global_pointers_list; if (r_comm.IsDistributed()) { for (auto &r_node : r_comm.LocalMesh().Nodes()) { auto& r_gp_to_neighbours = r_node.GetValue(NEIGHBOUR_NODES).GetContainer(); for (auto &r_gp : r_gp_to_neighbours) { global_pointers_list.push_back(r_gp); } } global_pointers_list.Unique(); } // Now create the pointer communicator and shock values retrieve proxy GlobalPointerCommunicator<Node<3>> pointer_communicator(r_data_comm, global_pointers_list); auto shock_variables_proxy = pointer_communicator.Apply([&](const GlobalPointer<Node<3>>& rpNode) { NeighbourData<TShockVariableType, TShockGradientVariableType> neighbour_data( rpNode->FastGetSolutionStepValue(rShockVariable), rpNode->GetValue(rShockGradientVariable), rpNode->Coordinates()); return neighbour_data; }); // Perform the shock detection #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>(r_comm.LocalMesh().NumberOfNodes()); ++i_node) { auto it_node = r_comm.LocalMesh().NodesBegin() + i_node; double& r_shock_sens = it_node->GetValue(*mpShockSensorVariable); const auto& r_var_i = it_node->FastGetSolutionStepValue(rShockVariable); const auto& r_grad_var_i = it_node->GetValue(rShockGradientVariable); // Loop the neighbours to compute the shock sensor r_shock_sens = 0.0; const double zero_tol = 1.0e-8; auto& r_neighbours = it_node->GetValue(NEIGHBOUR_NODES); KRATOS_DEBUG_ERROR_IF(r_neighbours.size() == 0) << "Node " << i_node << " has no neighbours." << std::endl; for (auto& r_neigh : r_neighbours ) { // Get the neighbour values const auto values_j = shock_variables_proxy.Get(&r_neigh); const double& r_var_j = values_j.mShockVariableValue; const auto& r_grad_var_j = values_j.mShockGradientVariableValue; const auto l_ji = values_j.mCoordinates - it_node->Coordinates(); // Calculate the shock sensor auxiliary values const auto aux_1 = r_var_j - r_var_i; const auto aux_2 = 0.5 * inner_prod(l_ji, r_grad_var_i + r_grad_var_j); const auto num = aux_1 - aux_2; const auto den = std::abs(aux_1) + std::abs(aux_2); // Check if the solution is not constant (den close to 0.0) double beta_ij = 0.0; if (std::abs(den) > zero_tol) { // Compute and bound the shock sensor const double aux_beta_ij = std::abs(num / den); beta_ij = aux_beta_ij < 1.0 ? aux_beta_ij : 1.0; } // Check against the current value of shock sensor and keep the largest one if (r_shock_sens < beta_ij) { r_shock_sens = beta_ij; } } } } ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const override; /// Print information about this object. virtual void PrintInfo(std::ostream &rOStream) const override; /// Print object's data. virtual void PrintData(std::ostream &rOStream) const override; ///@} private: ///@name Member Variables ///@{ /// Reference to the model part in where the shock detection is to be performed ModelPart& mrModelPart; /// Updates the NODAL_AREA at each time step (required in case the mesh deforms) const bool mUpdateNodalAreaAtEachStep = false; /// Updates the NODAL_NEIGHBOURS at each time step (required in case topology changes) const bool mUpdateNodalNeighboursAtEachStep = false; /// Flag to indicate if the nodal area has been already computed bool mNodalAreaAlreadyComputed = false; /// Flag to indicate if the nodal neighbours have been already computed bool mNodalNeighboursAlreadyComputed = false; /// Flag to indicate if the shock variable type is double or component one const bool mShockVariableIsDouble; /// Pointer to the shock detection double variable const Variable<double>* mpShockDoubleVariable = nullptr; /// Pointer to the shock detection component variable const VariableComponentType* mpShockComponentVariable = nullptr; /// Name of the shock detection gradient variable const Variable<array_1d<double,3>>* mpShockGradientVariable = nullptr; /// Name of the shock sensor variable const Variable<double>* mpShockSensorVariable = nullptr; ///@} ///@name Serialization ///@{ ///@} }; // Class ShockDetectionProcess ///@} ///@name Input and output ///@{ /// input stream function inline std::istream &operator>>( std::istream &rIStream, ShockDetectionProcess &rThis) { return rIStream; } /// output stream function inline std::ostream &operator<<( std::ostream &rOStream, const ShockDetectionProcess &rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_SHOCK_DETECTION_PROCESS defined
MergePreparator.h
// // Created by kilian on 10/03/17. // #ifndef STERMPARSER_MERGEPREPARATOR_H #define STERMPARSER_MERGEPREPARATOR_H #include <memory> #include "GrammarInfo.h" #include "LatentAnnotation.h" #include "TrainingCommon.h" #include <numeric> #include <omp.h> namespace Trainer { typedef std::function<double(const::std::vector<double>&)> ThresholdFunction; class MergePreparator { protected: std::shared_ptr<const GrammarInfo2> grammarInfo; const bool debug; /** * Builds MergeInfo according to merge-Δs and threshold. * If (merge-Δ > 1 or the split of a start symbol is concerned) * then the split is always merged. */ MergeInfo build_merge_info( const std::vector<std::vector<double>> &&merge_factors , const double merge_threshold , const std::vector<std::vector<double>> &&merge_delta , const std::vector<size_t> &nontSplits ) { std::vector<std::vector<std::vector<size_t>>> mergeSelection; std::vector<size_t> nontSplitsAfterMerge; unsigned nont = 0; unsigned merges = 0; unsigned splits = 0; if (debug) std::cerr << "merge deltas: "; for (const auto &delta : merge_delta) { if (debug) std::cerr << " { "; mergeSelection.push_back(std::vector<std::vector<size_t >>()); const size_t halfSplits = nontSplits[nont] / 2; for (size_t split = 0; split < halfSplits; ++split) { if (debug) std::cerr << delta[split] << " "; // merge if Δ >= merge_thershold * 0.999, i.e. log(Δ) >= log(θ) + log(0.999) (logarithmic) if (delta[split] >= merge_threshold + std::log(0.999) // always merge if Δ >= 1 // i.e. log(Δ) >= 0 + log(0.999) || delta[split] >= std::log(0.999) // always merge initial symbol || grammarInfo->start == nont) { mergeSelection.back().emplace_back(); mergeSelection.back().back().push_back(split); mergeSelection.back().back().push_back(split + halfSplits); ++merges; } else { mergeSelection.back().emplace_back(1, split); mergeSelection.back().emplace_back(1, split + halfSplits); ++splits; } } if (debug) std::cerr << " } "; ++nont; nontSplitsAfterMerge.push_back(mergeSelection.back().size()); } if (debug) std::cerr << std::endl; std::cerr << "Merging " << merges << " of " << merges + splits << " splits. Merge threshold is " << merge_threshold << std::endl; return MergeInfo(std::move(mergeSelection), std::move(nontSplitsAfterMerge), std::move(merge_factors)); } public: MergePreparator(std::shared_ptr<const GrammarInfo2> grammarInfo, bool debug = false) : grammarInfo(grammarInfo), debug(debug) {} virtual MergeInfo merge_prepare(const LatentAnnotation &latentAnnotation) = 0; virtual void setMergeThresholdFunction(ThresholdFunction /*thresholdFunction*/) {}; }; /** * Merges none of the splits, except for start symbol whose splits are always merged. */ class MergeNothingMergePreparator : public MergePreparator { public: MergeNothingMergePreparator(std::shared_ptr<const GrammarInfo2> grammarInfo, bool debug = false) : MergePreparator(grammarInfo, debug) {}; MergeInfo merge_prepare(const LatentAnnotation &latentAnnotation) { std::vector<std::vector<double>> mergeFactors; std::vector<std::vector<double>> mergeDelta; for (auto splits : latentAnnotation.nonterminalSplits) { mergeFactors.emplace_back(splits, 0.5); mergeDelta.emplace_back(splits / 2, std::log(0.4)); } double merge_threshold = std::log(0.5); return build_merge_info( std::move(mergeFactors) , merge_threshold , std::move(mergeDelta) , latentAnnotation.nonterminalSplits ); } }; template<typename Nonterminal, typename TraceID> class DefaultMergePreparator : public MergePreparator { protected: using TraceIterator = ConstManagerIterator<Trace < Nonterminal, TraceID>>; const TraceManagerPtr <Nonterminal, EdgeLabelT> traceManager; std::shared_ptr<StorageManager> storageManager; const unsigned threads; std::vector<MAPTYPE<Element<Node<Nonterminal>>, WeightVector>> tracesInsideWeights; std::vector<MAPTYPE<Element<Node<Nonterminal>>, WeightVector>> tracesOutsideWeights; public: DefaultMergePreparator( TraceManagerPtr <Nonterminal, EdgeLabelT> traceManager , std::shared_ptr<StorageManager> storageManager , std::shared_ptr<const GrammarInfo2> grammarInfo , unsigned threads = 1 , bool debug = false ) : MergePreparator(grammarInfo, debug), traceManager(traceManager), storageManager(storageManager), threads(threads) {} virtual MergeInfo merge_prepare(const LatentAnnotation &latentAnnotation) { // setup temporary data structures if (tracesInsideWeights.size() < traceManager->size()) tracesInsideWeights.resize(traceManager->size()); if (tracesOutsideWeights.size() < traceManager->size()) tracesOutsideWeights.resize(traceManager->size()); std::vector<WeightVector> nonterminalFrequencies{estimateNontFreqLA(latentAnnotation)}; std::vector<std::vector<double>> mergeFactors{computeMergeFactors(nonterminalFrequencies)}; std::vector<std::vector<double>> mergeDelta; for (auto split : latentAnnotation.nonterminalSplits) { mergeDelta.emplace_back(split / 2, std::log(1.0)); } computeMergeDeltas( mergeFactors , latentAnnotation.nonterminalSplits , mergeDelta ); const double merge_threshold = computeMergeThreshold(mergeDelta); // clean up storageManager->free_weight_maps(tracesInsideWeights); storageManager->free_weight_maps(tracesOutsideWeights); for (WeightVector &weightVector : nonterminalFrequencies) { storageManager->free_weight_vector(weightVector); } nonterminalFrequencies.clear(); return build_merge_info( std::move(mergeFactors) , merge_threshold , std::move(mergeDelta) , latentAnnotation.nonterminalSplits ); } protected: /** * What this function computes corresponds to the mergeWeights of the Berkeley parser. * @param latentAnnotation * @return */ inline std::vector<WeightVector> estimateNontFreqLA(const LatentAnnotation &latentAnnotation) { struct NontFreq { std::shared_ptr<StorageManager> storageManager; std::vector<WeightVector> nonterminalFrequencies; NontFreq( std::shared_ptr<StorageManager> storageManager , std::vector<WeightVector> &&nonterminalFrequencies ) : storageManager(storageManager), nonterminalFrequencies(nonterminalFrequencies) {}; NontFreq(const NontFreq &other) : storageManager(other.storageManager) { for (const WeightVector &vector : other.nonterminalFrequencies) { nonterminalFrequencies.push_back(storageManager->create_weight_vector<WeightVector>(vector.size())); nonterminalFrequencies.back() = vector; } } NontFreq &operator+=(const NontFreq &other) { std::transform( other.nonterminalFrequencies.cbegin() , other.nonterminalFrequencies.cend() , nonterminalFrequencies.begin() , nonterminalFrequencies.begin() , [](const WeightVector &x, const WeightVector &y) { return x + y; } ); return *this; } }; NontFreq nonterminalFrequencies(storageManager, initialize_nonterminal_frequencies(latentAnnotation)); // computing in(A_x) * out(A_x) for every A ∈ N and x ∈ X_A #ifdef _OPENMP omp_set_num_threads(threads); #endif #pragma omp declare reduction(+ : NontFreq : omp_out += omp_in) initializer (omp_priv = omp_orig) #pragma omp parallel for schedule(dynamic, 10) reduction(+:nonterminalFrequencies) for (TraceIterator traceIterator = traceManager->cbegin(); traceIterator < traceManager->cend(); ++traceIterator) { if (tracesInsideWeights[traceIterator - traceManager->cbegin()].size() != traceIterator->get_hypergraph()->size() or tracesOutsideWeights[traceIterator - traceManager->cbegin()].size() != traceIterator->get_hypergraph()->size()) { tracesInsideWeights[traceIterator - traceManager->cbegin()].clear(); tracesOutsideWeights[traceIterator - traceManager->cbegin()].clear(); for (const auto &node : *(traceIterator->get_hypergraph())) { tracesInsideWeights[traceIterator - traceManager->cbegin()].emplace( node , storageManager->create_weight_vector<WeightVector>(latentAnnotation.nonterminalSplits[node->get_label_id()])); tracesOutsideWeights[traceIterator - traceManager->cbegin()].emplace( node , storageManager->create_weight_vector<WeightVector>(latentAnnotation.nonterminalSplits[node->get_label_id()])); } } traceIterator->io_weights_la( latentAnnotation , tracesInsideWeights[traceIterator - traceManager->cbegin()] , tracesOutsideWeights[traceIterator - traceManager->cbegin()] , true ); const auto &insideWeights = tracesInsideWeights[traceIterator - traceManager->cbegin()]; const auto &outsideWeights = tracesOutsideWeights[traceIterator - traceManager->cbegin()]; for (const Element<Node<Nonterminal>> &node : *(traceIterator->get_hypergraph())) { const auto &insideWeight = insideWeights.at(node); const auto &outsideWeight = outsideWeights.at(node); const auto vals = insideWeight * outsideWeight; Eigen::Tensor<double, 0> denominator = vals.sum(); Eigen::Tensor<double, 1> fraction = vals.unaryExpr([denominator](double x) { return x / denominator(0); }); Eigen::Tensor<bool, 0> nan = fraction.isnan().any(); Eigen::Tensor<bool, 0> inf = fraction.isinf().any(); if (not nan(0) and not inf(0)) { auto &target = nonterminalFrequencies.nonterminalFrequencies[node->get_label_id()]; target += fraction * traceIterator->get_frequency(); } } } return nonterminalFrequencies.nonterminalFrequencies; } inline std::vector<WeightVector> initialize_nonterminal_frequencies(const LatentAnnotation &latentAnnotation) { std::vector<WeightVector> nonterminalFrequencies; for (size_t nont = 0; nont < latentAnnotation.nonterminalSplits.size(); ++nont) { WeightVector mw = storageManager->create_weight_vector<WeightVector>(latentAnnotation.nonterminalSplits[nont]); mw.setZero(); nonterminalFrequencies.push_back(mw); } return nonterminalFrequencies; } /** * @param nontFreqLA (== mergeWeight in Berkeley parser) * @return the p from the Berkeley parser */ inline std::vector<std::vector<double>> computeMergeFactors(const std::vector<WeightVector> &nontFreqLA) { std::cerr << "Computing merge factors." << std::endl; std::vector<std::vector<double>> p; for (auto las_weights : nontFreqLA) { p.emplace_back(std::vector<double>(las_weights.dimension(0))); const long int half_splits{las_weights.dimension(0) / 2}; for (unsigned i = 0; i < half_splits; ++i) { double combined_weight = las_weights(i) + las_weights(i + half_splits); if ((not std::isnan(combined_weight)) and combined_weight > 0) { p.back()[i] = las_weights(i) / combined_weight; p.back()[i + half_splits] = las_weights(i + half_splits) / combined_weight; } else { p.back()[i] = 0.5; p.back()[i + half_splits] = 0.5; } } } return p; } /** * Compute merge-Δ for each split. This is an approximation of likelihood after merge * divided by likelihood before merge. * Splits with high merge-Δ should be merged, splits with low merge-Δ should be kept. */ inline void computeMergeDeltas( const std::vector<std::vector<double>> &p , const std::vector<size_t> &nontDimensions , std::vector<std::vector<double>> &mergeDelta ) const { // prefix and postfix sums are used for efficient computation of // s(i) = sum_{j ∈ {0, …, i-1, i+1, …, n-1}} a_j // for each i ∈ {0, …, n-1} std::vector<double> prefixSums; std::vector<double> postfixSums; for (TraceIterator trace_id = traceManager->cbegin(); trace_id < traceManager->cend(); ++trace_id) { const MAPTYPE<Element<Node<Nonterminal>>, WeightVector> &insideWeights = tracesInsideWeights[ trace_id - traceManager->cbegin()]; const MAPTYPE<Element<Node<Nonterminal>>, WeightVector> &outsideWeights = tracesOutsideWeights[ trace_id - traceManager->cbegin()]; for (const Element<Node<Nonterminal>> &node : *(trace_id->get_hypergraph())) { const size_t nont = node->get_label_id(); const size_t nontDim = nontDimensions[nont]; const size_t halfDim = nontDim / 2; const auto &insideWeight = insideWeights.at(node); const auto &outsideWeight = outsideWeights.at(node); prefixSums.resize(halfDim, 0.0); postfixSums.resize(halfDim, 0.0); double denominator = 0; { const size_t idx = halfDim - 1; const double in1 = insideWeight(idx); const double in2 = insideWeight(idx + halfDim); const double out1 = outsideWeight(idx); const double out2 = outsideWeight(idx + halfDim); denominator += in1 * out1 + in2 * out2; } for (size_t idx = 0; idx < halfDim - 1; ++idx) { const double in1 = insideWeight(idx); const double in2 = insideWeight(idx + halfDim); const double out1 = outsideWeight(idx); const double out2 = outsideWeight(idx + halfDim); prefixSums[idx + 1] = prefixSums[idx] + in1 * out1 + in2 * out2; denominator += in1 * out1 + in2 * out2; } for (size_t idx = halfDim - 1; idx > 0; --idx) { const double in1 = insideWeight(idx); const double in2 = insideWeight(idx + halfDim); const double out1 = outsideWeight(idx); const double out2 = outsideWeight(idx + halfDim); postfixSums[idx - 1] = postfixSums[idx] + in1 * out1 + in2 * out2; } // inside weight of some nodes can be zero in certain LA-dimensions // since LA-rule weights may converge to zero // we ignore those dimensions in Δ computation if (denominator == 0) continue; for (unsigned idx = 0; idx < halfDim; ++idx) { const double in1 = insideWeight(idx); const double in2 = insideWeight(idx + halfDim); const double out1 = outsideWeight(idx); const double out2 = outsideWeight(idx + halfDim); const double p1 = p[nont][idx]; const double p2 = p[nont][idx + halfDim]; const double inMerged = (p1 * in1) + (p2 * in2); const double outMerged = out1 + out2; const double Q = (prefixSums[idx] + postfixSums[idx] + inMerged * outMerged) / denominator; if (std::isnan(Q)) { std::cerr << "bad fraction " << Q << " where" << std::endl; std::cerr << "prefix " << prefixSums[idx] << std::endl; std::cerr << "postfix " << postfixSums[idx] << std::endl; std::cerr << "merged " << inMerged * outMerged << std::endl; std::cerr << "denom " << denominator << std::endl; assert(!std::isnan(Q)); } double &delta = mergeDelta[nont][idx]; delta += std::log(Q); } prefixSums.clear(); postfixSums.clear(); } } } virtual double computeMergeThreshold(const std::vector<std::vector<double>> &mergeDelta) = 0; }; /** * Merge all splits, where merge-Δ is above given threshold. */ template<typename Nonterminal, typename TraceID> class ThresholdMergePreparator : public DefaultMergePreparator<Nonterminal, TraceID> { const double merge_threshold; public: ThresholdMergePreparator( TraceManagerPtr <Nonterminal, TraceID> traceManager , std::shared_ptr<StorageManager> storageManager , std::shared_ptr<const GrammarInfo2> grammarInfo , double merge_threshold , unsigned threads = 1 , bool debug = false ) : DefaultMergePreparator<Nonterminal, TraceID>( traceManager , storageManager , grammarInfo , threads , debug ), merge_threshold(merge_threshold) {} protected: double computeMergeThreshold(const std::vector<std::vector<double>> &merge_delta) { std::cerr << "Selecting merges "; std::cerr << "above threshold " << merge_threshold; std::cerr << std::endl; return merge_threshold; } }; /** * Merges the first mergePercent % of splits ordered by merge-Δ in descending order. */ template<typename Nonterminal, typename TraceID> class PercentMergePreparator : public DefaultMergePreparator<Nonterminal, TraceID> { const double mergePercent; public: PercentMergePreparator( TraceManagerPtr <Nonterminal, TraceID> traceManager , std::shared_ptr<StorageManager> storageManager , std::shared_ptr<const GrammarInfo2> grammarInfo , double mergePercent , unsigned threads = 1 , bool debug = false ) : DefaultMergePreparator<Nonterminal, TraceID>(traceManager, storageManager, grammarInfo, threads, debug), mergePercent(mergePercent) {} protected: double computeMergeThreshold(const std::vector<std::vector<double>> &mergeDelta) { std::cerr << "Selecting merges "; std::cerr << "best " << mergePercent << " % "; std::cerr << std::endl; std::vector<double> orderedMergeDeltas; // order merges according to likelihood_loss for (const auto &delta : mergeDelta) { orderedMergeDeltas.insert( std::end(orderedMergeDeltas) , std::begin(delta) , std::end(delta)); } std::sort(std::begin(orderedMergeDeltas), std::end(orderedMergeDeltas), std::greater<double>()); std::cerr << "ordered merge Δs: "; for (auto weight : orderedMergeDeltas) std::cerr << weight << " "; std::cerr << std::endl; // todo: option to skip over merge_weights >= 1 size_t index = (size_t) (mergePercent / 100.0 * orderedMergeDeltas.size()); if (index > orderedMergeDeltas.size()) index = orderedMergeDeltas.size() - 1; std::cerr << "index for ordered merges " << index << " / " << orderedMergeDeltas.size() << std::endl; return orderedMergeDeltas[index]; } }; /** * Merges nonterminals according to the principle stated in www.aclweb.org/anthology/E14-1015 * * Merge-Δs are computed for each pair {i,j} of latent annotations of some nonterminal. * Then a fully connected, undirected graph G with latent annotations as nodes is constructed. * Each edge {i,j} is weighted by w=Δ({i,j}) and edges with w <= threshold are removed. * The (strongly) connected components of G are the new latent annotations. * Merge weights are chosen proportional to the expected frequency of the annotations. * * @tparam Nonterminal * @tparam TraceID */ template <typename Nonterminal, typename TraceID> class SCCMerger : public DefaultMergePreparator<Nonterminal, TraceID> { std::vector<size_t> relevantNonterminals; ThresholdFunction thresholdFunction; public: SCCMerger( TraceManagerPtr <Nonterminal, TraceID> traceManager , std::shared_ptr<StorageManager> storageManager , std::shared_ptr<const GrammarInfo2> grammarInfo , std::vector<size_t> relevantNonterminals , ThresholdFunction thresholdFunction , unsigned threads = 1 , bool debug = false ) : DefaultMergePreparator<Nonterminal, TraceID> ( traceManager , storageManager , grammarInfo , threads , debug ), relevantNonterminals(relevantNonterminals), thresholdFunction(thresholdFunction) {}; MergeInfo merge_prepare(const LatentAnnotation &latentAnnotation) { // setup temporary data structures if (this->tracesInsideWeights.size() < this->traceManager->size()) this->tracesInsideWeights.resize(this->traceManager->size()); if (this->tracesOutsideWeights.size() < this->traceManager->size()) this->tracesOutsideWeights.resize(this->traceManager->size()); std::vector<WeightVector> nonterminalFrequencies{this->estimateNontFreqLA(latentAnnotation)}; // computing Δ per nont and pair of LAs j and i (where j > i) std::vector<std::vector<std::vector<double>>> merge_delta; computePairwiseMergeDeltas(nonterminalFrequencies, latentAnnotation.nonterminalSplits, merge_delta); auto stats = mergeWeightStatistics(merge_delta); const double merge_threshold = thresholdFunction(stats); std::cerr << "SCC merging with threshold: " << merge_threshold << std::endl; // ingredients for the MergeInfo std::vector<std::vector<std::vector<size_t>>> mergeSources; std::vector<size_t> nontSplitsAfterMerge; std::vector<std::vector<double>> mergeFactors; for (size_t nont = 0; nont < latentAnnotation.nonterminalSplits.size(); ++nont) { // check if nont ∈ relevantNonterminals bool relevant = false; for (size_t nont2 : relevantNonterminals) { if (nont2 == nont) relevant = true; if (nont2 >= nont) break; } if (relevant) { // lazily build graph by pairwise connecting all LAs of nont (implicit) // we only add an edge to the representation, if it is not removed in the next step // the graph is represented by two maps encoding maximal SCCs, // satisfying // 1. j ∈ edges[i] if i < j and (i,j) are connected in graph // 2. inSCC[i] = i or i ∈ edges[inSCC[i]] MAPTYPE<size_t, std::vector<size_t >> edges; MAPTYPE<size_t, size_t> inSCC; // determine weight Δ for each edge (i,j) in graph and remove edge if Δ <= threshold // i.e., we add i and j to the same SCC if Δ > threshold for (size_t i = 0; i < latentAnnotation.nonterminalSplits[nont]; ++i) { for (size_t j = i + 1; j < latentAnnotation.nonterminalSplits[nont]; ++j) { if (merge_delta[nont][j][i] > merge_threshold) { if (not(inSCC.count(i) or inSCC.count(j))) { edges[i].push_back(j); inSCC[i] = i; inSCC[j] = i; } else if (not inSCC.count(j)) { inSCC[j] = inSCC[i]; edges[inSCC[i]].push_back(j); } else if (not inSCC.count(i)) { inSCC[i] = inSCC[j]; edges[inSCC[j]].push_back(i); } else { if (inSCC[i] == inSCC[j]) { // nothing needs to be done! } else if (inSCC[i] < inSCC[j]) { const size_t old_scc_j = inSCC[j]; for (size_t k : edges[old_scc_j]) { edges[inSCC[i]].push_back(k); inSCC[k] = inSCC[i]; } edges[inSCC[i]].push_back(old_scc_j); inSCC[old_scc_j] = inSCC[i]; edges.erase(old_scc_j); } else { const size_t old_scc_i = inSCC[i]; for (size_t k : edges[old_scc_i]) { edges[inSCC[j]].push_back(k); inSCC[k] = inSCC[j]; } edges[inSCC[j]].push_back(old_scc_i); inSCC[old_scc_i] = inSCC[j]; edges.erase(old_scc_i); } } } } } // new LAs = maximal SCCs and // set mergeFactor proportional to nontFreq std::vector<std::vector<size_t>> mergeLists; std::vector<double> laMergeFactors(latentAnnotation.nonterminalSplits[nont]); size_t merged_splits = 0; for (auto key_value_pair : edges) { if (inSCC[key_value_pair.first] != key_value_pair.first) continue; mergeLists.push_back(key_value_pair.second); mergeLists.back().push_back(key_value_pair.first); std::sort(mergeLists.back().begin(), mergeLists.back().end()); merged_splits += mergeLists.back().size(); double normalizer = 0.0; for (auto la : mergeLists.back()) normalizer += nonterminalFrequencies[nont](la); if (normalizer > 0 and not std::isnan(normalizer) and not std::isnan(normalizer)) for (auto la : mergeLists.back()) { /* for debugging if (nont == 179) std::cerr << nont << " la: " << la << " freq: " << nonterminalFrequencies[nont](la) << " n: " << normalizer << std::endl; */ laMergeFactors[la] = nonterminalFrequencies[nont](la) / normalizer; } else for (auto la : mergeLists.back()) { laMergeFactors[la] = 1 / mergeLists.back().size(); } } // add all singletons for (size_t la = 0; la < latentAnnotation.nonterminalSplits[nont]; ++la) { if (not inSCC.count(la)) { mergeLists.emplace_back(1, la); laMergeFactors[la] = 1.0; ++merged_splits; } } /*// for debugging for (size_t i = 0; i < mergeLists.size(); ++i) { std::cerr << nont << ": " << i << " [ "; for (auto elem : mergeLists[i]) std::cerr << elem << ", "; std::cerr << "]" << std::endl; } */ if (merged_splits != latentAnnotation.nonterminalSplits[nont]) { for (size_t la = 0; la < latentAnnotation.nonterminalSplits[nont]; ++la) { if (inSCC.count(la)) std::cerr << nont << "-" << la << " is in SCC " << inSCC[la] << std::endl; else std::cerr << nont << "-" << la << " is not in any SCC" << std::endl; if (edges.count(la)) { std::cerr << nont << "-" << la << " has edges to "; for (auto e : edges[la]) std::cerr << e << " "; std::cerr << std::endl; } else std::cerr << nont << "-" << la << " has no edges" << std::endl; } abort(); } nontSplitsAfterMerge.push_back(mergeLists.size()); mergeSources.push_back(mergeLists); mergeFactors.push_back(laMergeFactors); // if nont not in relevant items } else { size_t n = latentAnnotation.nonterminalSplits.at(nont); nontSplitsAfterMerge.push_back(n); mergeFactors.emplace_back(n, 1.0); std::vector<std::vector<size_t>> mergeLists; for (size_t la = 0; la < n; ++la) { mergeLists.emplace_back(1, la); } mergeSources.push_back(mergeLists); /*// for debugging for (size_t i = 0; i < mergeLists.size(); ++i) { std::cerr << nont << ": " << i << " [ "; for (auto elem : mergeLists[i]) std::cerr << elem << ", "; std::cerr << "]" << std::endl; } */ } } // clean up this->storageManager->free_weight_maps(this->tracesInsideWeights); this->storageManager->free_weight_maps(this->tracesOutsideWeights); for (WeightVector &weightVector : nonterminalFrequencies) { this->storageManager->free_weight_vector(weightVector); } nonterminalFrequencies.clear(); return MergeInfo(mergeSources, nontSplitsAfterMerge, mergeFactors); } void setMergeThresholdFunction(ThresholdFunction thresholdFunction) { this->thresholdFunction = thresholdFunction; } private: /** * Compute merge-Δ for each pair of latent annotation. This is an approximation of likelihood after merge * divided by likelihood before merge. * Splits with high merge-Δ should be merged, splits with low merge-Δ should be kept. */ inline void computePairwiseMergeDeltas( const std::vector<WeightVector> & expectedFrequencies , const std::vector<size_t> &nontDimensions , std::vector<std::vector<std::vector<double>>> &mergeDelta ) const { mergeDelta.clear(); for (size_t nont = 0; nont < nontDimensions.size(); ++nont){ mergeDelta.emplace_back(0); for (size_t j = 0; j < nontDimensions[nont]; ++ j) { mergeDelta.back().emplace_back(j, 0.0); } } for (typename DefaultMergePreparator<Nonterminal, TraceID>::TraceIterator trace_id = this->traceManager->cbegin() ; trace_id < this->traceManager->cend() ; ++trace_id) { const MAPTYPE<Element<Node<Nonterminal>>, WeightVector> &insideWeights = this->tracesInsideWeights[trace_id - this->traceManager->cbegin()]; const MAPTYPE<Element<Node<Nonterminal>>, WeightVector> &outsideWeights = this->tracesOutsideWeights[trace_id - this->traceManager->cbegin()]; for (const Element<Node<Nonterminal>> &node : *(trace_id->get_hypergraph())) { const size_t nont = node->get_label_id(); const size_t nontDim = nontDimensions[nont]; const auto &insideWeight = insideWeights.at(node); const auto &outsideWeight = outsideWeights.at(node); double denominator = 0.0; for (size_t i = 0; i < nontDim; ++i) { const double in = insideWeight(i); const double out = outsideWeight(i); denominator += in * out; } if ( denominator <= 0 or std::isinf(denominator) or std::isnan(denominator)) continue; double prefix_sum = 0; for (size_t i = 0; i < nontDim; ++i) { const double in1 = insideWeight(i); const double out1 = outsideWeight(i); double infix_sum = 0; for (size_t j = i + 1; j < nontDim; ++j) { const double in2 = insideWeight(j); const double out2 = outsideWeight(j); const double f_norm = expectedFrequencies[nont](i) + expectedFrequencies[nont](j); const double p1 = expectedFrequencies[nont](i) / f_norm; const double p2 = expectedFrequencies[nont](j) / f_norm; const double inMerged = (p1 * in1) + (p2 * in2); const double outMerged = out1 + out2; double postfix_sum = 0; for (size_t k = j + 1; k < nontDim; ++k) { postfix_sum += insideWeight(k) * outsideWeight(k); } const double others = prefix_sum + infix_sum + postfix_sum; const double Q = (others + inMerged * outMerged) / denominator; if (std::isnan(Q)) { std::cerr << "bad fraction " << Q << " where" << std::endl; std::cerr << "merged " << inMerged * outMerged << std::endl; std::cerr << "denom " << denominator << std::endl; assert(!std::isnan(Q)); } double &delta = mergeDelta[nont][j][i]; delta += std::log(Q); infix_sum += in2 * out2; } prefix_sum += in1 * out1; } } } // for (auto nont = 0; nont < nontDimensions.size(); ++nont) { // for (size_t j = 0; j < nontDimensions[nont]; ++j) // for (size_t i = 0; i < j; ++i) // std::cerr << "(" << nont << ": " << j << " vs. " << i << ": " << mergeDelta[nont][j][i] << ") "; // } // std::cerr << std::endl; } // not used in this class double computeMergeThreshold(const std::vector<std::vector<double>> &) { return 0.0; }; // compute merge Δ statistics std::vector<double> mergeWeightStatistics(const std::vector<std::vector<std::vector<double>>>& mergeDeltas) { double min {std::numeric_limits<double>::max()}; double max {std::numeric_limits<double>::min()}; double sum {0.0}; size_t count {0}; for (auto nont_vec : mergeDeltas) { for (auto la_1 : nont_vec){ for (auto la_1_2_delta : la_1) { if (la_1_2_delta > max) max = la_1_2_delta; if (la_1_2_delta < min) min = la_1_2_delta; sum += la_1_2_delta; count++; } } } const double mean {sum / count}; double above_mean_sum {0.0}; size_t above_mean_count {0}; double below_mean_sum {0.0}; size_t below_mean_count {0}; for (auto nont_vec : mergeDeltas) { for (auto la_1 : nont_vec){ for (auto la_1_2_delta : la_1) { if (la_1_2_delta > mean) { above_mean_sum += la_1_2_delta; above_mean_count++; } else if (la_1_2_delta < mean) { below_mean_sum += la_1_2_delta; below_mean_count++; } } } } const double third_quartile {above_mean_count > 0 ? above_mean_sum / above_mean_count : mean}; const double first_quartile {below_mean_count > 0 ? below_mean_sum / below_mean_count : mean}; std::cerr << "SCC merge Δ statistics {"; std::cerr << "min: " << min << " first quartile: " << first_quartile << " mean: " << mean << " third quartile: " << third_quartile << " max: " << max << " }" << std::endl; return {min, first_quartile, mean, third_quartile, max}; } }; } #endif //STERMPARSER_MERGEPREPARATOR_H
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPDeclareReductionDecl; class OMPClause; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. return isVisible(Old) || New->isExternallyVisible(); } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; /// Records and restores the vtordisp state on entry/exit of C++ method body. class VtorDispStackRAII { public: VtorDispStackRAII(Sema &S, bool ShouldSaveAndRestore) : S(S), ShouldSaveAndRestore(ShouldSaveAndRestore), OldVtorDispStack() { if (ShouldSaveAndRestore) OldVtorDispStack = S.VtorDispModeStack; } ~VtorDispStackRAII() { if (ShouldSaveAndRestore) S.VtorDispModeStack = OldVtorDispStack; } private: Sema &S; bool ShouldSaveAndRestore; SmallVector<MSVtorDispAttr::Mode, 2> OldVtorDispStack; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildPipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Check if module import may be found in the current context, /// emit error if not. void diagnoseMisplacedModuleImport(Module *M, SourceLocation ImportLoc); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool EnumUnderlyingIsImplicit, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// \brief Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, AvailabilityMergeKind AMK, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, llvm::InlineAsmIdentifierInfo &Info, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// \brief Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList, UsingDirectiveDecl * &UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// \brief Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<Decl *> Params, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// \brief Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); void AddNSConsumedAttr(SourceRange AttrRange, Decl *D, unsigned SpellingListIndex, bool isNSConsumed, bool isTemplateInstantiation); //===--------------------------------------------------------------------===// // C++ Coroutines TS // ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(SourceLocation KwLoc, Expr *E); ExprResult BuildCoawaitExpr(SourceLocation KwLoc, Expr *E); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); public: /// \brief Return true if the provided declaration \a VD should be captured by /// reference in the provided scope \a RSI. This will take into account the /// semantics of the directive and associated clauses. bool IsOpenMPCapturedByRef(ValueDecl *D, const sema::CapturedRegionScopeInfo *RSI); /// \brief Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *IsOpenMPCapturedDecl(ValueDecl *D); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// \brief Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(ValueDecl *D, unsigned Level); /// \brief Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(ValueDecl *D, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// \brief Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// \brief Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// \brief Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// \brief Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// \brief Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer); /// \brief Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// \brief Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions, if ConvertRHS // is true. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// \brief Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. Only in effect if // LangOpts.CUDADisableTargetCallChecks is true. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches(const FunctionDecl *Caller, SmallVectorImpl<FunctionDecl *> &Matches); void EraseUnwantedCUDAMatches(const FunctionDecl *Caller, SmallVectorImpl<DeclAccessPair> &Matches); void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartImpl(CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinMSVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
ps_local-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2014 by Contributors * \file ps_local-inl.h * \brief local multi-threading implementation of PS abstraction * * \author Tianqi Chen, Mu Li */ #ifndef MSHADOW_PS_LOCAL_INL_H_ // NOLINT(*) #define MSHADOW_PS_LOCAL_INL_H_ // NOLINT(*) #include <map> #include <utility> #include <string> #include <vector> #if defined(_OPENMP) #include <omp.h> #ifdef _MSC_VER typedef int ms_omp_uint; #else typedef unsigned ms_omp_uint; #endif #endif #include "./thread.h" #include "./thread_util.h" namespace mshadow { namespace ps { // multi-threaded implementation of template<typename xpu, typename DType> class LocalModel : public ISharedModel<xpu, DType> { public: // redefine callback function typedef typename ISharedModel<xpu, DType>::CallbackFunction CallbackFunction; // constructor LocalModel(void) { init_end = 0; perdev_pull_thread = 1; perdev_push_thread = 1; use_fifo_push_queue = 0; bigarray_bound = 1000 * 1000; nthread_reduction = 8; use_pin_memory = 1; test_on_server = 0; update_on_server = 0; destroy_signal = false; custom_server = NULL; } // destructor virtual ~LocalModel(void) { this->Destroy(); } inline void Destroy(void) { if (init_end != 0) { destroy_signal = true; for (size_t i = 0; i < push_queues.size(); ++i) { push_queues[i].Abort(1); } for (size_t i = 0; i < pull_queues.size(); ++i) { pull_queues[i].Abort(1); } for (size_t i = 0; i < thread_push_handler.size(); ++i) { thread_push_handler[i].Join(); } for (size_t i = 0; i < thread_pull_handler.size(); ++i) { thread_pull_handler[i].Join(); } for (size_t i = 0; i < push_queues.size(); ++i) { push_queues[i].Destroy(); } push_map.Destroy(); push_lock.Destroy(); for (size_t i = 0; i < pull_queues.size(); ++i) { pull_queues[i].Destroy(); } pull_map.Destroy(); request_lock.Destroy(); wait_lock.Destroy(); wait_cond.Destroy(); init_end = 0; } if (custom_server != NULL) { delete custom_server; custom_server = NULL; } } virtual void SetParam(const char *name, const char *val) { int key; if (sscanf(name, "push_op[%d]", &key) == 1) { if (!strcmp(val, "gather")) { request_lock.Lock(); push_operation[key] = kGather; request_lock.Unlock(); return; } if (!strcmp(val, "sum")) { push_operation[key] = kSum; return; } LOG(FATAL) << "unknown push operation " << val; } if (!strcmp(name, "reduce_thread")) { nthread_reduction = atoi(val); } if (!strcmp(name, "use_pin_memory")) { use_pin_memory = atoi(val); } if (!strcmp(name, "bigarray_bound")) { bigarray_bound = static_cast<size_t>(atol(val)); } if (!strcmp(name, "pull_thread")) { if (!strcmp(val, "ndev")) { perdev_pull_thread = 1; } else if (!strcmp(val, "one")) { perdev_pull_thread = 0; } else { LOG(FATAL) << "invalid value for parameter pull_thread," << " can only be ndev or one"; } } if (!strcmp(name, "push_thread")) { if (!strcmp(val, "ndev")) { perdev_push_thread = 1; } else if (!strcmp(val, "one")) { perdev_push_thread = 0; } else { LOG(FATAL) << "invalid value for parameter push_thread," << " can only be ndev or one"; } } if (!strcmp(name, "update_on_server")) { update_on_server = atoi(val); } if (!strcmp(name, "test_on_server")) { test_on_server = atoi(val); } // ignore message parameter if (!strncmp(name, "msg:", 4)) return; cfgvec.push_back(std::make_pair(std::string(name), std::string(val))); } virtual void PullWait(int key, int devid) { const int wid = GetWorkIndex(devid); PullEntry *p = pull_map.Get(key); if (p == NULL || p->wait.size() == 0) return; PullEntry &e = *p; // wake up waiters if any CHECK_EQ(e.wait.size(), devices.size()) << "PullWait: must initialize the wait"; PullWaitRecord &w = e.wait[wid]; if (!w.finished) { wait_lock.Lock(); w.nwait += 1; while (!w.finished) { wait_cond.Wait(&wait_lock); } w.nwait -= 1; CHECK_GE(w.nwait, 0) << "boundary check"; wait_lock.Unlock(); } } virtual void Init(const std::vector<int> &devices) { CHECK_EQ(init_end, 0) << "LocalServer.Init can only call Init once"; CHECK_NE(devices.size(), 0) << "LocalServer.Init: must at least contain 1 devices"; this->devices = devices; destroy_signal = false; // initialize device id to local index dev2index.clear(); for (size_t i = 0; i < devices.size(); ++i) { int devid = devices[i]; CHECK_GE(devid, 0) << "device id must be bigger than 0"; if (devid >= static_cast<int>(dev2index.size())) { dev2index.resize(devid + 1, -1); } dev2index[devid] = static_cast<int>(i); } // allocate space pull_stream.resize(devices.size()); push_stream.resize(devices.size()); // initialize all the thread related things if (perdev_push_thread != 0) { push_queues.resize(devices.size()); } else { push_queues.resize(1); } for (size_t i = 0; i < push_queues.size(); ++i) { push_queues[i].Init(use_fifo_push_queue != 0); } push_map.Init(); push_lock.Init(); pull_map.Init(); request_lock.Init(); wait_lock.Init(); wait_cond.Init(); if (perdev_pull_thread != 0) { pull_queues.resize(devices.size()); } else { pull_queues.resize(1); } for (size_t i = 0; i < pull_queues.size(); ++i) { pull_queues[i].Init(); } // initialize the thread if (perdev_push_thread != 0) { thread_push_handler.resize(devices.size()); for (size_t i = 0; i < devices.size(); ++i) { std::pair<LocalModel*, size_t> *p = new std::pair<LocalModel*, size_t>(); *p = std::make_pair(this, i); thread_push_handler[i].Start(PushLocalThread, p); } } else { thread_push_handler.resize(1); thread_push_handler[0].Start(PushGlobalThread, this); } // initialize pull handler if (perdev_pull_thread != 0) { thread_pull_handler.resize(devices.size()); for (size_t i = 0; i < devices.size(); ++i) { std::pair<LocalModel*, size_t> *p = new std::pair<LocalModel*, size_t>(); *p = std::make_pair(this, i); thread_pull_handler[i].Start(PullLocalThread, p); } } else { thread_pull_handler.resize(1); thread_pull_handler[0].Start(PullGlobalThread, this); } this->InitCustomerServer(); this->init_end = 1; } // set weight virtual void SetWeight_(Tensor<xpu, 2, DType> data, int key, int devid) { PushEntry &e = push_map.GetRef(key); Stream<xpu> s; push_lock.Lock(); mshadow::Copy(e.weight, data, &s); push_lock.Unlock(); } virtual void CheckWeight_(Tensor<xpu, 2, DType> data, int key, int devid) { CHECK_NE(test_on_server, 0) << "must be in pair debug mode"; PushEntry &e = push_map.GetRef(key); mshadow::TensorContainer<cpu, 2, DType> tmp(false); tmp.Resize(data.shape_); Stream<xpu> s; push_lock.Lock(); // copy data mshadow::Copy(tmp, data, &s); index_t count = tmp.shape_.Size(); double diff = 0.0, ssum = 0.0, maxdiff = 0.0; index_t mxidx = 0; for (index_t i = 0; i < count; ++i) { double d = std::abs(tmp.dptr_[i] - e.weight.dptr_[i]); if (d > maxdiff) { maxdiff = d; mxidx = i; } diff += d; ssum += std::abs(tmp.dptr_[i]); } push_lock.Unlock(); // relative absolute error double rerr = diff / ssum; if (rerr > 1e-5 || diff != diff) { fprintf(stderr, "PSLocal:key=%d,dev=%d: err=%f, maxd[%u]=%f, diff=%f, ssum=%f\n", key, devid, rerr, mxidx, maxdiff, diff, ssum); } else { fprintf(stderr, "PSLocal:key=%d,dev=%d:check pass\n", key, devid); } } protected: /*! \brief operation performed locally in PS */ enum LocalOp { /*! \brief take sum of all devices over the same key */ kSum = 0, /*! * \brief concatenate(gather), * the tensors in all devices with same key */ kGather = 1 }; virtual void InitKey_(Shape<2> shape, int key, int devid) { this->InitPullMap(key); this->InitPushMap(key, shape); } virtual void Push_(Tensor<xpu, 2, DType> data, int key, int devid, int priority) { PullEntry &e = pull_map.GetRef(key); e.req[GetWorkIndex(devid)].ready = false; if (perdev_push_thread != 0) { int wid = GetWorkIndex(devid); push_queues[wid].Push(PullTask(data, key, devid), priority); } else { push_queues[0].Push(PullTask(data, key, devid), priority); } } virtual void PullReq_(Tensor<xpu, 2, DType> data, int key, int devid, int priority, CallbackFunction callback, void *callback_arg) { PullEntry &e = pull_map.GetRef(key); CHECK_EQ(e.req.size(), devices.size()) << "PullReq: must initialize the key, req"; CHECK_EQ(e.wait.size(), devices.size()) << "PullReq: must initialize the key, wait"; const int wid = GetWorkIndex(devid); PullReqRecord &r = e.req[wid]; r.dest = data; r.priority = priority; r.callback = callback; r.callback_arg = callback_arg; // reset pull request finish mark wait_lock.Lock(); e.wait[wid].finished = false; wait_lock.Unlock(); // check ready event request_lock.Lock(); CHECK_EQ(!r.pending, true) << "key = " << key << "cannot send duplicate pull request before it finishes"; if (e.req[wid].ready) { if (perdev_pull_thread != 0) { pull_queues[wid].Push(std::make_pair(key, devid)); } else { pull_queues[0].Push(std::make_pair(key, devid)); } } else { r.pending = true; } request_lock.Unlock(); } /*! * \brief called to notify that the data is ready for pull * \param data the data that can be pulled back * \param the key of the data */ virtual void PullReady(Tensor<cpu, 2> data, int key) { PullEntry &e = pull_map.GetRef(key); CHECK_EQ(e.req.size(), devices.size()) << "PullReady: must initialize the key, req"; request_lock.Lock(); e.src = data; for (index_t i = 0; i < e.req.size(); ++i) { e.req[i].ready = true; if (e.req[i].pending) { if (perdev_pull_thread != 0) { pull_queues[i].Push(std::make_pair(key, devices[i])); } else { pull_queues[0].Push(std::make_pair(key, devices[i])); } e.req[i].pending = false; } } request_lock.Unlock(); } virtual void ServerInitKey(Tensor<cpu, 2> weight, int key) { if (custom_server != NULL) { // intialize server, and ready for pullback custom_server->InitModel(key, weight.dptr_, weight.MSize()); if (update_on_server != 0) { this->PullReady(weight, key); } } } /*! * \brief event handler for push finish * called when all the data with same key comes int * \param data the buffer holds the data in all devices * \param key the key of the data */ virtual void HandlePushFinish(Tensor<cpu, 3, DType> data, int key) { // LOG(ERROR) << dbstr(data); LocalOp op = kSum; typename std::map<int, LocalOp>::const_iterator it = push_operation.find(key); if (it != push_operation.end() && it->first == key) { op = it->second; } // customized server if (custom_server != NULL) { this->ReduceSum(data); custom_server->Update(key, data[0].dptr_, data[0].MSize()); if (update_on_server != 0) { PushEntry &e = push_map.GetRef(key); this->PullReady(e.weight, key); } else { CHECK_NE(test_on_server, 0) << "test mode"; this->PullReady(data[0], key); } return; } switch (op) { case kSum: { this->ReduceSum(data); this->PullReady(data[0], key); return; } case kGather: { this->PullReady(data.FlatTo2D(), key); return; } default: LOG(FATAL) << "unknown LocalOp"; } } /*! * \brief event handler for reduce finish * called when all the data with same key finishes the reduction * \param data the buffer holds the reduction result * \param key the key of the data */ inline void HandleReduceFinish(Tensor<cpu, 2, DType> data, int key) { if (custom_server != NULL) { custom_server->Update(key, data.dptr_, data.MSize()); if (update_on_server != 0) { PushEntry &e = push_map.GetRef(key); this->PullReady(e.weight, key); } else { CHECK_NE(test_on_server, 0) << "test mode"; this->PullReady(data, key); } } else { this->PullReady(data, key); } } virtual void InitCustomerServer(void) { if (update_on_server != 0 || test_on_server != 0) { custom_server = CreateModelUpdater<DType>(); for (size_t j = 0; j < cfgvec.size(); ++j) { custom_server->SetParam(cfgvec[j].first.c_str(), cfgvec[j].second.c_str()); } custom_server->InitUpdater(0, 0, NULL); } } protected: // customized server IModelUpdater<DType> *custom_server; // whether use fifo push queue int use_fifo_push_queue; // perform sum reduction inline void ReduceSum(Tensor<cpu, 3, DType> data) { #if defined(_OPENMP) if (data[0].MSize() >= bigarray_bound && nthread_reduction != 0) { ms_omp_uint ntask = static_cast<ms_omp_uint>(data.size(1)); #pragma omp parallel for schedule(static) num_threads(nthread_reduction) for (ms_omp_uint j = 0; j < ntask; ++j) { for (index_t i = 1; i < data.size(0); ++i) { data[0][j] += data[i][j]; } } } else //NOLINT(*) #endif { for (index_t i = 1; i < data.size(0); ++i) { data[0] += data[i]; } } } private: /*! \brief task running */ struct PullTask { /*! \brief the task data source */ Tensor<xpu, 2, DType> data; /*! \brief the key to the tensor */ int key; /*! * \brief the device id, (key,devid), * uniquely identifies a mem location */ int devid; PullTask(void) {} PullTask(Tensor<xpu, 2, DType> data, int key, int devid) : data(data), key(key), devid(devid) {} }; /*! \brief data structure to hold temporal push result */ struct PushEntry { // temporal space to hold input data Tensor<cpu, 4, DType> data; // temporal space to hold weight, if needed Tensor<cpu, 2, DType> weight; // indicator whether the certain devices is already copied in std::vector<bool> copied; // number of data copied in int num_copied; // version number of data used to hold incomming data in push int copyin_version; // use pinned memory bool pin_memory; // constructor PushEntry(void) : copyin_version(0) { weight.dptr_ = NULL; } ~PushEntry(void) { if (data.dptr_ != NULL) { if (pin_memory) { mshadow::FreeHost<xpu>(&data); if (weight.dptr_ != NULL) { mshadow::FreeHost<xpu>(&weight); } } else { mshadow::FreeSpace(&data); if (weight.dptr_ != NULL) { mshadow::FreeSpace(&weight); } } } } // constructor inline void Init(int ndevice, Shape<2> shape, bool pin_memory, bool need_weight) { this->pin_memory = pin_memory; data.shape_ = Shape4(2, ndevice, shape[0], shape[1]); weight.shape_ = shape; if (pin_memory) { mshadow::AllocHost<xpu>(&data); if (need_weight) mshadow::AllocHost<xpu>(&weight); } else { mshadow::AllocSpace(&data, false); if (need_weight) mshadow::AllocSpace(&weight); } CHECK_EQ(data.CheckContiguous(), true) << "Data must be contiguous"; CHECK(!need_weight || weight.CheckContiguous()) << "Weight must be contiguous"; num_copied = 0; copied.resize(ndevice, false); } }; // a record to remember things related to pull request struct PullReqRecord { // whether this record contains a pending request // whether pull is ready to go bool ready; // waiting for pull ready bool pending; // the destination to pull data into Tensor<xpu, 2, DType> dest; // the priority of the int priority; // callback function CallbackFunction *callback; // argument for callback void *callback_arg; PullReqRecord(void) : ready(false), pending(false) { } }; // a record to help handle pullwait struct PullWaitRecord { // number of thread that waits for the request to finish int nwait; // the request was finished bool finished; PullWaitRecord(void) : nwait(0), finished(true) { // set finished to true so pull without pull request returns } }; /*! \brief data structure to hold pull request */ struct PullEntry { // data to be pulled back Tensor<cpu, 2, DType> src; // pullrequest record std::vector<PullReqRecord> req; // whether there is thread waiting on this event std::vector<PullWaitRecord> wait; PullEntry(void) { } }; // signal to notify all the thread about class destruction bool destroy_signal; // vector of devices std::vector<int> devices; // device index to local index std::vector<int> dev2index; //----- data structure used to support push ---- // stream used by push thread each device for memcpy std::vector<Stream<xpu>*> push_stream; // the queue used for push task std::vector<utils::ThreadPQueue<PullTask> > push_queues; // thread to handle push task std::vector<utils::Thread> thread_push_handler; // lock to lock push field utils::Mutex push_lock; // the map of push buffer utils::ThreadSafeMap<PushEntry> push_map; // customized local reduction operation std::map<int, LocalOp> push_operation; //----- data structure used to support pull ---- // the queue used for pull task std::vector<utils::ThreadPQueue<std::pair<int, int> > > pull_queues; // stream used by pull thread each device for memcpy std::vector<Stream<xpu>*> pull_stream; // the map to store pull status utils::ThreadSafeMap<PullEntry> pull_map; // thread to handle pull task std::vector<utils::Thread> thread_pull_handler; // lock to lock request field utils::Mutex request_lock; // lock to lock wait field utils::Mutex wait_lock; // conditional variable to do waiting utils::ConditionVariable wait_cond; // ---------configurations of server------- int init_end; // whether perform update on serverside int update_on_server; // debug option int test_on_server; // use pinned memory int use_pin_memory; // number of reduction thread int nthread_reduction; // the threshold for big array size_t bigarray_bound; // whether use pull thread per device int perdev_pull_thread; // whether use push thread per device int perdev_push_thread; /*! \brief history of configurations */ std::vector< std::pair<std::string, std::string> > cfgvec; // push handler inline void PushProc(utils::ThreadPQueue<PullTask> *queue) { while (!destroy_signal) { PullTask tsk; if (queue->Pop(&tsk)) { const int wid = GetWorkIndex(tsk.devid); PushEntry &e = push_map.GetRef(tsk.key); CHECK_EQ(e.data[0][0].shape_, tsk.data.shape_) << "Tensor with same key must share same shape " << e.data[0][0].shape_ << " vs " << tsk.data.shape_; CHECK_EQ(!e.copied[wid], true) << "data inconsistency"; // start copy SetDevice<xpu>(tsk.devid); Copy(e.data[e.copyin_version][wid], tsk.data, push_stream[wid]); // wait till the copy finishes push_stream[wid]->Wait(); // mark copied e.copied[wid] = true; push_lock.Lock(); e.num_copied += 1; int cp_version = e.copyin_version; bool push_finish = e.num_copied >= static_cast<int>(devices.size()); if (push_finish) { // switch version e.copyin_version = (e.copyin_version + 1) % e.data.size(0); std::fill(e.copied.begin(), e.copied.end(), false); e.num_copied = 0; } push_lock.Unlock(); if (push_finish) { this->HandlePushFinish(e.data[cp_version], tsk.key); } } else { CHECK_EQ(destroy_signal, true) << "abort but not destroy"; } } } inline void PushHandlerGlobal(void) { // allocate stream resources for (size_t i = 0; i < devices.size(); ++i) { SetDevice<xpu>(devices[i]); push_stream[i] = NewStream<xpu>(devices[i]); } this->PushProc(&push_queues[0]); // free resources for (size_t i = 0; i < devices.size(); ++i) { SetDevice<xpu>(devices[i]); DeleteStream(push_stream[i]); } } inline void PushHandlerLocal(size_t tid) { CHECK_LT(tid, devices.size()) << "threadid exceed boundary"; CHECK_EQ(push_queues.size(), devices.size()) << "must have one pull_queue per device"; // allocate stream resources SetDevice<xpu>(devices[tid]); push_stream[tid] = NewStream<xpu>(devices[tid]); this->PushProc(&push_queues[tid]); SetDevice<xpu>(devices[tid]); DeleteStream(push_stream[tid]); } /*!\brief entry point of loader thread */ inline static MSHADOW_THREAD_PREFIX PushGlobalThread(void *pthread) { static_cast<LocalModel*>(pthread)->PushHandlerGlobal(); utils::ThreadExit(NULL); return NULL; } inline static MSHADOW_THREAD_PREFIX PushLocalThread(void *arg) { std::pair<LocalModel*, size_t> *p = static_cast<std::pair<LocalModel*, size_t>*>(arg); p->first->PushHandlerLocal(p->second); delete p; return NULL; } // push handler procedure inline void PullProc(utils::ThreadPQueue<std::pair<int, int> > *queue) { while (!destroy_signal) { std::pair<int, int> tsk; if (queue->Pop(&tsk)) { const int key = tsk.first; const int devid = tsk.second; const int wid = GetWorkIndex(devid); PullEntry &e = pull_map.GetRef(key); { // handle request CHECK_EQ(e.req.size(), devices.size()) << "PullHandler: must initialize the key, req"; PullReqRecord &r = e.req[wid]; SetDevice<xpu>(devid); Copy(r.dest, e.src, pull_stream[wid]); // callback, if any if (r.callback != NULL) { (*r.callback)(pull_stream[wid], r.callback_arg); } // wait till the operation finishes pull_stream[wid]->Wait(); } { // wake up waiters if any CHECK_EQ(e.wait.size(), devices.size()) << "PullHandler, must initialize the key, req"; PullWaitRecord &w = e.wait[wid]; wait_lock.Lock(); w.finished = true; if (w.nwait != 0) { wait_cond.Broadcast(); } wait_lock.Unlock(); } } else { CHECK_EQ(destroy_signal, true) << "abort but not destroy"; } } } // use one thread for all pull actions inline void PullHandlerGlobal(void) { // allocate stream resources for (size_t i = 0; i < devices.size(); ++i) { SetDevice<xpu>(devices[i]); pull_stream[i] = NewStream<xpu>(devices[i]); } this->PullProc(&pull_queues[0]); // free resources for (size_t i = 0; i < devices.size(); ++i) { SetDevice<xpu>(devices[i]); DeleteStream(pull_stream[i]); } } inline void PullHandlerLocal(size_t tid) { CHECK_LT(tid, devices.size()) << "threadid exceed boundary"; CHECK_EQ(pull_queues.size(), devices.size()) << "must have one pull_queue per device"; // allocate stream resources SetDevice<xpu>(devices[tid]); pull_stream[tid] = NewStream<xpu>(devices[tid]); this->PullProc(&pull_queues[tid]); SetDevice<xpu>(devices[tid]); DeleteStream(pull_stream[tid]); } /*!\brief entry point of pull thread, one thread for all devices */ inline static MSHADOW_THREAD_PREFIX PullGlobalThread(void *arg) { static_cast<LocalModel*>(arg)->PullHandlerGlobal(); return NULL; } inline static MSHADOW_THREAD_PREFIX PullLocalThread(void *arg) { std::pair<LocalModel*, size_t> *p = static_cast<std::pair<LocalModel*, size_t>*>(arg); p->first->PullHandlerLocal(p->second); delete p; return NULL; } // get internal index of device inline int GetWorkIndex(int devid) const { CHECK(devid >= 0 && devid < static_cast<int>(dev2index.size()) && dev2index[devid] >= 0) << "Push: invalid devid"; return dev2index[devid]; } // functions to handle pull inline void InitPullMap(int key) { pull_map.Init(key); PullEntry &e = pull_map.GetRef(key); request_lock.Lock(); // must recheck after lock if (e.req.size() == 0) { e.req.resize(devices.size(), PullReqRecord()); } request_lock.Unlock(); // check wait map wait_lock.Lock(); // must recheck after lock if (e.wait.size() == 0) { e.wait.resize(devices.size(), PullWaitRecord()); } wait_lock.Unlock(); } // functions to handle pull inline void InitPushMap(int key, Shape<2> shape) { push_map.Init(key); PushEntry &e = push_map.GetRef(key); push_lock.Lock(); if (e.copied.size() == 0) { e.Init(devices.size(), shape, use_pin_memory != 0, update_on_server != 0 || test_on_server != 0); } this->ServerInitKey(e.weight, key); push_lock.Unlock(); } }; } // namespace ps } // namespace mshadow #endif // MSHADOW_PS_LOCAL_INL_H_ NOLINT(*)
soxr.c
/* SoX Resampler Library Copyright (c) 2007-18 robs@users.sourceforge.net * Licence for this file: LGPL v2.1 See LICENCE for details. */ #include <math.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "soxr.h" #include "data-io.h" #include "internal.h" #if AVUTIL_FOUND #include <libavutil/cpu.h> #endif #if WITH_DEV_TRACE #include <stdarg.h> #include <stdio.h> int _soxr_trace_level; void _soxr_trace(char const * fmt, ...) { va_list args; va_start(args, fmt); vfprintf(stderr, fmt, args); fputc('\n', stderr); va_end(args); } #endif char const * soxr_version(void) { return "libsoxr-" SOXR_THIS_VERSION_STR; } typedef void sample_t; /* float or double */ typedef void (* fn_t)(void); typedef fn_t control_block_t[10]; #define resampler_input (*(sample_t * (*)(void *, sample_t * samples, size_t n))p->control_block[0]) #define resampler_process (*(void (*)(void *, size_t))p->control_block[1]) #define resampler_output (*(sample_t const * (*)(void *, sample_t * samples, size_t * n))p->control_block[2]) #define resampler_flush (*(void (*)(void *))p->control_block[3]) #define resampler_close (*(void (*)(void *))p->control_block[4]) #define resampler_delay (*(double (*)(void *))p->control_block[5]) #define resampler_sizes (*(void (*)(size_t * shared, size_t * channel))p->control_block[6]) #define resampler_create (*(char const * (*)(void * channel, void * shared, double io_ratio, soxr_quality_spec_t * q_spec, soxr_runtime_spec_t * r_spec, double scale))p->control_block[7]) #define resampler_set_io_ratio (*(void (*)(void *, double io_ratio, size_t len))p->control_block[8]) #define resampler_id (*(char const * (*)(void))p->control_block[9]) typedef void * resampler_t; /* For one channel. */ typedef void * resampler_shared_t; /* Between channels. */ typedef void (* deinterleave_t)(sample_t * * dest, soxr_datatype_t data_type, void const * * src0, size_t n, unsigned ch); typedef size_t (* interleave_t)(soxr_datatype_t data_type, void * * dest, sample_t const * const * src, size_t, unsigned, unsigned long *); struct soxr { unsigned num_channels; double io_ratio; soxr_error_t error; soxr_quality_spec_t q_spec; soxr_io_spec_t io_spec; soxr_runtime_spec_t runtime_spec; void * input_fn_state; soxr_input_fn_t input_fn; size_t max_ilen; resampler_shared_t shared; resampler_t * resamplers; control_block_t control_block; deinterleave_t deinterleave; interleave_t interleave; void * * channel_ptrs; size_t clips; unsigned long seed; int flushing; }; #if WITH_CR32 || WITH_CR32S || WITH_CR64 || WITH_CR64S #include "filter.h" #else #define lsx_to_3dB(x) ((x)/(x)) #endif soxr_quality_spec_t soxr_quality_spec(unsigned long recipe, unsigned long flags) { soxr_quality_spec_t spec, * p = &spec; unsigned q = recipe & 0xf; /* TODO: move to soxr-lsr.c: */ unsigned quality = q > SOXR_LSR2Q+2? SOXR_VHQ : q > SOXR_LSR2Q? SOXR_QQ : q; double rej; memset(p, 0, sizeof(*p)); if (quality > SOXR_PRECISIONQ) { p->e = "invalid quality type"; return spec; } flags |= quality < SOXR_LSR0Q ? RESET_ON_CLEAR : 0; p->phase_response = "\62\31\144"[(recipe & 0x30)>>4]; p->stopband_begin = 1; p->precision = quality == SOXR_QQ ? 0 : quality <= SOXR_16_BITQ ? 16 : quality <= SOXR_32_BITQ ? 4 + quality * 4 : quality <= SOXR_LSR2Q ? 55 - quality * 4 : /* TODO: move to soxr-lsr.c */ 0; rej = p->precision * linear_to_dB(2.); p->flags = flags; if (quality <= SOXR_32_BITQ || quality == SOXR_PRECISIONQ) { #define LOW_Q_BW0 (1385 / 2048.) /* 0.67625 rounded to be a FP exact. */ p->passband_end = quality == 1? LOW_Q_BW0 : 1 - .05 / lsx_to_3dB(rej); if (quality <= 2) p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM; } else { /* TODO: move to soxr-lsr.c */ static float const bw[] = {.931f, .832f, .663f}; p->passband_end = bw[quality - SOXR_LSR0Q]; if (quality == SOXR_LSR2Q) { p->flags &= ~SOXR_ROLLOFF_NONE; p->flags |= SOXR_ROLLOFF_LSR2Q | SOXR_PROMOTE_TO_LQ; } } if (recipe & SOXR_STEEP_FILTER) p->passband_end = 1 - .01 / lsx_to_3dB(rej); return spec; } char const * soxr_engine(soxr_t p) { return resampler_id(); } size_t * soxr_num_clips(soxr_t p) { return &p->clips; } soxr_error_t soxr_error(soxr_t p) { return p->error; } soxr_runtime_spec_t soxr_runtime_spec(unsigned num_threads) { soxr_runtime_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); p->log2_min_dft_size = 10; p->log2_large_dft_size = 17; p->coef_size_kbytes = 400; p->num_threads = num_threads; return spec; } soxr_io_spec_t soxr_io_spec( soxr_datatype_t itype, soxr_datatype_t otype) { soxr_io_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); if ((itype | otype) >= SOXR_SPLIT * 2) p->e = "invalid io datatype(s)"; else { p->itype = itype; p->otype = otype; p->scale = 1; } return spec; } #if (WITH_CR32S && WITH_CR32) || (WITH_CR64S && WITH_CR64) #if defined __GNUC__ && defined __x86_64__ #define CPUID(type, eax_, ebx_, ecx_, edx_) \ __asm__ __volatile__ ( \ "cpuid \n\t" \ : "=a" (eax_), "=b" (ebx_), "=c" (ecx_), "=d" (edx_) \ : "a" (type), "c" (0)); #elif defined __GNUC__ && defined __i386__ #define CPUID(type, eax_, ebx_, ecx_, edx_) \ __asm__ __volatile__ ( \ "mov %%ebx, %%edi \n\t" \ "cpuid \n\t" \ "xchg %%edi, %%ebx \n\t" \ : "=a" (eax_), "=D" (ebx_), "=c" (ecx_), "=d" (edx_) \ : "a" (type), "c" (0)); #elif defined _M_X64 && defined _MSC_VER && _MSC_VER > 1500 void __cpuidex(int CPUInfo[4], int info_type, int ecxvalue); #pragma intrinsic(__cpuidex) #define CPUID(type, eax_, ebx_, ecx_, edx_) do { \ int regs[4]; \ __cpuidex(regs, type, 0); \ eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \ } while(0) #elif defined _M_X64 && defined _MSC_VER void __cpuidex(int CPUInfo[4], int info_type); #pragma intrinsic(__cpuidex) #define CPUID(type, eax_, ebx_, ecx_, edx_) do { \ int regs[4]; \ __cpuidex(regs, type); \ eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \ } while(0) #elif defined _M_IX86 && defined _MSC_VER #define CPUID(type, eax_, ebx_, ecx_, edx_) \ __asm pushad \ __asm mov eax, type \ __asm xor ecx, ecx \ __asm cpuid \ __asm mov eax_, eax \ __asm mov ebx_, ebx \ __asm mov ecx_, ecx \ __asm mov edx_, edx \ __asm popad #endif #endif #if WITH_CR32S && WITH_CR32 static bool cpu_has_simd32(void) { #if defined __x86_64__ || defined _M_X64 return true; #elif defined __i386__ || defined _M_IX86 enum {SSE = 1 << 25, SSE2 = 1 << 26}; unsigned eax_, ebx_, ecx_, edx_; CPUID(1, eax_, ebx_, ecx_, edx_); return (edx_ & (SSE|SSE2)) != 0; #elif defined AV_CPU_FLAG_NEON return !!(av_get_cpu_flags() & AV_CPU_FLAG_NEON); #else return false; #endif } static bool should_use_simd32(void) { char const * e; return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) : ((e = getenv("SOXR_USE_SIMD32")))? !!atoi(e) : cpu_has_simd32(); } #else #define should_use_simd32() true #endif #if WITH_CR64S && WITH_CR64 #if defined __GNUC__ #define XGETBV(type, eax_, edx_) \ __asm__ __volatile__ ( \ ".byte 0x0f, 0x01, 0xd0\n" \ : "=a"(eax_), "=d"(edx_) : "c" (type)); #elif defined _M_X64 && defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219 #include <immintrin.h> #define XGETBV(type, eax_, edx_) do { \ union {uint64_t x; uint32_t y[2];} a = {_xgetbv(0)}; \ eax_ = a.y[0], edx_ = a.y[1]; \ } while(0) #elif defined _M_IX86 && defined _MSC_VER #define XGETBV(type, eax_, edx_) \ __asm pushad \ __asm mov ecx, type \ __asm _emit 0x0f \ __asm _emit 0x01 \ __asm _emit 0xd0 \ __asm mov eax_, eax \ __asm mov edx_, edx \ __asm popad #else #define XGETBV(type, eax_, edx_) eax_ = edx_ = 0 #endif static bool cpu_has_simd64(void) { enum {OSXSAVE = 1 << 27, AVX = 1 << 28}; unsigned eax_, ebx_, ecx_, edx_; CPUID(1, eax_, ebx_, ecx_, edx_); if ((ecx_ & (OSXSAVE|AVX)) == (OSXSAVE|AVX)) { XGETBV(0, eax_, edx_); return (eax_ & 6) == 6; } return false; } static bool should_use_simd64(void) { char const * e; return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) : ((e = getenv("SOXR_USE_SIMD64")))? !!atoi(e) : cpu_has_simd64(); } #else #define should_use_simd64() true #endif extern control_block_t _soxr_rate32_cb, _soxr_rate32s_cb, _soxr_rate64_cb, _soxr_rate64s_cb, _soxr_vr32_cb; static void runtime_num(char const * env_name, int min, int max, unsigned * field) { char const * e = getenv(env_name); if (e) { int i = atoi(e); if (i >= min && i <= max) *field = (unsigned)i; } } static void runtime_flag(char const * env_name, unsigned n_bits, unsigned n_shift, unsigned long * flags) { char const * e = getenv(env_name); if (e) { int i = atoi(e); unsigned long mask = (1UL << n_bits) - 1; if (i >= 0 && i <= (int)mask) *flags &= ~(mask << n_shift), *flags |= ((unsigned long)i << n_shift); } } soxr_t soxr_create( double input_rate, double output_rate, unsigned num_channels, soxr_error_t * error0, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { double io_ratio = output_rate!=0? input_rate!=0? input_rate / output_rate : -1 : input_rate!=0? -1 : 0; static const float datatype_full_scale[] = {1, 1, 65536.*32768, 32768}; soxr_t p = 0; soxr_error_t error = 0; #if WITH_DEV_TRACE #define _(x) (char)(sizeof(x)>=10? 'a'+(char)(sizeof(x)-10):'0'+(char)sizeof(x)) char const * e = getenv("SOXR_TRACE"); _soxr_trace_level = e? atoi(e) : 0; { static char const arch[] = {_(char), _(short), _(int), _(long), _(long long) , ' ', _(float), _(double), _(long double) , ' ', _(int *), _(int (*)(int)) , ' ', HAVE_BIGENDIAN ? 'B' : 'L' #if defined _OPENMP , ' ', 'O', 'M', 'P' #endif , 0}; #undef _ lsx_debug("arch: %s", arch); } #endif if (q_spec && q_spec->e) error = q_spec->e; else if (io_spec && (io_spec->itype | io_spec->otype) >= SOXR_SPLIT * 2) error = "invalid io datatype(s)"; if (!error && !(p = calloc(sizeof(*p), 1))) error = "malloc failed"; if (p) { control_block_t * control_block; p->q_spec = q_spec? *q_spec : soxr_quality_spec(SOXR_HQ, 0); if (q_spec) { /* Backwards compatibility with original API: */ if (p->q_spec.passband_end > 2) p->q_spec.passband_end /= 100; if (p->q_spec.stopband_begin > 2) p->q_spec.stopband_begin = 2 - p->q_spec.stopband_begin / 100; } p->io_ratio = io_ratio; p->num_channels = num_channels; if (io_spec) p->io_spec = *io_spec; else p->io_spec.scale = 1; p->runtime_spec = runtime_spec? *runtime_spec : soxr_runtime_spec(1); runtime_num("SOXR_MIN_DFT_SIZE", 8, 15, &p->runtime_spec.log2_min_dft_size); runtime_num("SOXR_LARGE_DFT_SIZE", 8, 20, &p->runtime_spec.log2_large_dft_size); runtime_num("SOXR_COEFS_SIZE", 100, 800, &p->runtime_spec.coef_size_kbytes); runtime_num("SOXR_NUM_THREADS", 0, 64, &p->runtime_spec.num_threads); runtime_flag("SOXR_COEF_INTERP", 2, 0, &p->runtime_spec.flags); runtime_flag("SOXR_STRICT_BUF", 1, 2, &p->runtime_spec.flags); runtime_flag("SOXR_NOSMALLINTOPT", 1, 3, &p->runtime_spec.flags); p->io_spec.scale *= datatype_full_scale[p->io_spec.otype & 3] / datatype_full_scale[p->io_spec.itype & 3]; p->seed = (unsigned long)time(0) ^ (unsigned long)(size_t)p; #if WITH_CR32 || WITH_CR32S || WITH_VR32 if (0 #if WITH_VR32 || ((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR)) #endif #if WITH_CR32 || WITH_CR32S || !(WITH_CR64 || WITH_CR64S) || (p->q_spec.precision <= 20 && !(p->q_spec.flags & SOXR_DOUBLE_PRECISION)) #endif ) { p->deinterleave = (deinterleave_t)_soxr_deinterleave_f; p->interleave = (interleave_t)_soxr_interleave_f; control_block = #if WITH_VR32 ((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR))? &_soxr_vr32_cb : #endif #if WITH_CR32S !WITH_CR32 || should_use_simd32()? &_soxr_rate32s_cb : #endif &_soxr_rate32_cb; } #if WITH_CR64 || WITH_CR64S else #endif #endif #if WITH_CR64 || WITH_CR64S { p->deinterleave = (deinterleave_t)_soxr_deinterleave; p->interleave = (interleave_t)_soxr_interleave; control_block = #if WITH_CR64S !WITH_CR64 || should_use_simd64()? &_soxr_rate64s_cb : #endif &_soxr_rate64_cb; } #endif memcpy(&p->control_block, control_block, sizeof(p->control_block)); if (p->num_channels && io_ratio!=0) error = soxr_set_io_ratio(p, io_ratio, 0); } if (error) soxr_delete(p), p = 0; if (error0) *error0 = error; return p; } soxr_error_t soxr_set_input_fn(soxr_t p, soxr_input_fn_t input_fn, void * input_fn_state, size_t max_ilen) { p->input_fn_state = input_fn_state; p->input_fn = input_fn; p->max_ilen = max_ilen? max_ilen : (size_t)-1; return 0; } static void soxr_delete0(soxr_t p) { unsigned i; if (p->resamplers) for (i = 0; i < p->num_channels; ++i) { if (p->resamplers[i]) resampler_close(p->resamplers[i]); free(p->resamplers[i]); } free(p->resamplers); free(p->channel_ptrs); free(p->shared); memset(p, 0, sizeof(*p)); } double soxr_delay(soxr_t p) { return (p && !p->error && p->resamplers)? resampler_delay(p->resamplers[0]) : 0; } static soxr_error_t fatal_error(soxr_t p, soxr_error_t error) { soxr_delete0(p); return p->error = error; } static soxr_error_t initialise(soxr_t p) { unsigned i; size_t shared_size, channel_size; resampler_sizes(&shared_size, &channel_size); p->channel_ptrs = calloc(sizeof(*p->channel_ptrs), p->num_channels); p->shared = calloc(shared_size, 1); p->resamplers = calloc(sizeof(*p->resamplers), p->num_channels); if (!p->shared || !p->channel_ptrs || !p->resamplers) return fatal_error(p, "malloc failed"); for (i = 0; i < p->num_channels; ++i) { soxr_error_t error; if (!(p->resamplers[i] = calloc(channel_size, 1))) return fatal_error(p, "malloc failed"); error = resampler_create( p->resamplers[i], p->shared, p->io_ratio, &p->q_spec, &p->runtime_spec, p->io_spec.scale); if (error) return fatal_error(p, error); } return 0; } soxr_error_t soxr_set_num_channels(soxr_t p, unsigned num_channels) { if (!p) return "invalid soxr_t pointer"; if (num_channels == p->num_channels) return p->error; if (!num_channels) return "invalid # of channels"; if (p->resamplers) return "# of channels can't be changed"; p->num_channels = num_channels; return soxr_set_io_ratio(p, p->io_ratio, 0); } soxr_error_t soxr_set_io_ratio(soxr_t p, double io_ratio, size_t slew_len) { unsigned i; soxr_error_t error; if (!p) return "invalid soxr_t pointer"; if ((error = p->error)) return error; if (!p->num_channels) return "must set # channels before O/I ratio"; if (io_ratio <= 0) return "I/O ratio out-of-range"; if (!p->channel_ptrs) { p->io_ratio = io_ratio; return initialise(p); } if (p->control_block[8]) { for (i = 0; !error && i < p->num_channels; ++i) resampler_set_io_ratio(p->resamplers[i], io_ratio, slew_len); return error; } return fabs(p->io_ratio - io_ratio) < 1e-15? 0 : "varying O/I ratio is not supported with this quality level"; } void soxr_delete(soxr_t p) { if (p) soxr_delete0(p), free(p); } soxr_error_t soxr_clear(soxr_t p) /* TODO: this, properly. */ { if (p) { struct soxr tmp = *p; soxr_delete0(p); memset(p, 0, sizeof(*p)); p->input_fn = tmp.input_fn; p->runtime_spec = tmp.runtime_spec; p->q_spec = tmp.q_spec; p->io_spec = tmp.io_spec; p->num_channels = tmp.num_channels; p->input_fn_state = tmp.input_fn_state; memcpy(p->control_block, tmp.control_block, sizeof(p->control_block)); p->deinterleave = tmp.deinterleave; p->interleave = tmp.interleave; return (p->q_spec.flags & RESET_ON_CLEAR)? soxr_set_io_ratio(p, tmp.io_ratio, 0) : 0; } return "invalid soxr_t pointer"; } static void soxr_input_1ch(soxr_t p, unsigned i, soxr_cbuf_t src, size_t len) { sample_t * dest = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)(&dest, p->io_spec.itype, &src, len, 1); } static size_t soxr_input(soxr_t p, void const * in, size_t len) { bool separated = !!(p->io_spec.itype & SOXR_SPLIT); unsigned i; if (!p || p->error) return 0; if (!in && len) {p->error = "null input buffer pointer"; return 0;} if (!len) { p->flushing = true; return 0; } if (separated) for (i = 0; i < p->num_channels; ++i) soxr_input_1ch(p, i, ((soxr_cbufs_t)in)[i], len); else { for (i = 0; i < p->num_channels; ++i) p->channel_ptrs[i] = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)( (sample_t **)p->channel_ptrs, p->io_spec.itype, &in, len, p->num_channels); } return len; } static size_t soxr_output_1ch(soxr_t p, unsigned i, soxr_buf_t dest, size_t len, bool separated) { sample_t const * src; if (p->flushing) resampler_flush(p->resamplers[i]); resampler_process(p->resamplers[i], len); src = resampler_output(p->resamplers[i], NULL, &len); if (separated) p->clips += (p->interleave)(p->io_spec.otype, &dest, &src, len, 1, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); else p->channel_ptrs[i] = (void /* const */ *)src; return len; } static size_t soxr_output_no_callback(soxr_t p, soxr_buf_t out, size_t len) { unsigned u; size_t done = 0; bool separated = !!(p->io_spec.otype & SOXR_SPLIT); #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done1; done1 = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], len, separated); if (!i) done = done1; } else #endif for (u = 0; u < p->num_channels; ++u) done = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], len, separated); if (!separated) p->clips += (p->interleave)(p->io_spec.otype, &out, (sample_t const * const *)p->channel_ptrs, done, p->num_channels, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); return done; } size_t soxr_output(soxr_t p, void * out, size_t len0) { size_t odone, odone0 = 0, olen = len0, osize, idone; size_t ilen = min(p->max_ilen, (size_t)ceil((double)olen *p->io_ratio)); void const * in = out; /* Set to !=0, so that caller may leave unset. */ bool was_flushing; if (!p || p->error) return 0; if (!out && len0) {p->error = "null output buffer pointer"; return 0;} do { odone = soxr_output_no_callback(p, out, olen); odone0 += odone; if (odone0 == len0 || !p->input_fn || p->flushing) break; osize = soxr_datatype_size(p->io_spec.otype) * p->num_channels; out = (char *)out + osize * odone; olen -= odone; idone = p->input_fn(p->input_fn_state, &in, ilen); was_flushing = p->flushing; if (!in) p->error = "input function reported failure"; else soxr_input(p, in, idone); } while (odone || idone || (!was_flushing && p->flushing)); return odone0; } static size_t soxr_i_for_o(soxr_t p, size_t olen, size_t ilen) { size_t result; #if 0 if (p->runtime_spec.flags & SOXR_STRICT_BUFFERING) result = rate_i_for_o(p->resamplers[0], olen); else #endif result = (size_t)ceil((double)olen * p->io_ratio); return min(result, ilen); } #if 0 static size_t soxr_o_for_i(soxr_t p, size_t ilen, size_t olen) { size_t result = (size_t)ceil((double)ilen / p->io_ratio); return min(result, olen); } #endif soxr_error_t soxr_process(soxr_t p, void const * in , size_t ilen0, size_t * idone0, void * out, size_t olen , size_t * odone0) { size_t ilen, idone, odone = 0; unsigned u; bool flush_requested = false; if (!p) return "null pointer"; if (!in) flush_requested = true, ilen = ilen0 = 0; else { if ((ptrdiff_t)ilen0 < 0) flush_requested = true, ilen0 = ~ilen0; if (idone0 && (1 || flush_requested)) ilen = soxr_i_for_o(p, olen, ilen0); else ilen = ilen0/*, olen = soxr_o_for_i(p, ilen, olen)*/; } p->flushing |= ilen == ilen0 && flush_requested; if (!out && !in) idone = ilen; else if (p->io_spec.itype & p->io_spec.otype & SOXR_SPLIT) { /* Both i & o */ #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done; if (in) soxr_input_1ch(p, (unsigned)i, ((soxr_cbufs_t)in)[i], ilen); done = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], olen, true); if (!i) odone = done; } else #endif for (u = 0; u < p->num_channels; ++u) { if (in) soxr_input_1ch(p, u, ((soxr_cbufs_t)in)[u], ilen); odone = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], olen, true); } idone = ilen; } else { idone = ilen? soxr_input (p, in , ilen) : 0; odone = soxr_output(p, out, olen); } if (idone0) *idone0 = idone; if (odone0) *odone0 = odone; return p->error; } soxr_error_t soxr_oneshot( double irate, double orate, unsigned num_channels, void const * in , size_t ilen, size_t * idone, void * out, size_t olen, size_t * odone, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { soxr_t resampler; soxr_error_t error = q_spec? q_spec->e : 0; if (!error) { soxr_quality_spec_t q_spec1; if (!q_spec) q_spec1 = soxr_quality_spec(SOXR_VHQ, 0), q_spec = &q_spec1; resampler = soxr_create(irate, orate, num_channels, &error, io_spec, q_spec, runtime_spec); } if (!error) { error = soxr_process(resampler, in, ~ilen, idone, out, olen, odone); soxr_delete(resampler); } return error; } soxr_error_t soxr_set_error(soxr_t p, soxr_error_t error) { if (!p) return "null pointer"; if (!p->error && p->error != error) return p->error; p->error = error; return 0; }
expected_output.c
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> #include <polybench.h> #include "gemver.h" /** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /*gemver.c: this file is part of PolyBench/C*/ /*Include polybench common header.*/ /*Include benchmark-specific header.*/ /*Array initialization.*/ static void init_array(int n, double *alpha, double *beta, double A[2000][2000], double u1[2000], double v1[2000], double u2[2000], double v2[2000], double w[2000], double x[2000], double y[2000], double z[2000]) { int i, j; *alpha = 1.5; *beta = 1.2; double fn = (double) n; for(i = 0; i < n; i++) { u1[i] = i; u2[i] = ((i + 1) / fn) / 2.0; v1[i] = ((i + 1) / fn) / 4.0; v2[i] = ((i + 1) / fn) / 6.0; y[i] = ((i + 1) / fn) / 8.0; z[i] = ((i + 1) / fn) / 9.0; x[i] = 0.0; w[i] = 0.0; for(j = 0; j < n; j++) A[i][j] = (double) (i * j % n) / n; } } /*DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output.*/ static void print_array(int n, double w[2000]) { int i; fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n"); fprintf(stderr, "begin dump: %s", "w"); for(i = 0; i < n; i++) { if(i % 20 == 0) fprintf(stderr, "\n"); fprintf(stderr, "%0.2lf ", w[i]); } fprintf(stderr, "\nend dump: %s\n", "w"); fprintf(stderr, "==END DUMP_ARRAYS==\n"); } /*Main computational kernel. The whole function will be timed, including the call and return.*/ static void kernel_gemver(int n, double alpha, double beta, double A[2000][2000], double u1[2000], double v1[2000], double u2[2000], double v2[2000], double w[2000], double x[2000], double y[2000], double z[2000]) { int i, j; #pragma omp parallel for default(shared) private(i, j) firstprivate(n, u1, v1, u2, v2) for(i = 0; i < n; i++) { // #pragma omp parallel for default(shared) private(j) firstprivate(n, i, u1, v1, u2, v2) for(j = 0; j < n; j++) A[i][j] = A[i][j] + u1[i] * v1[j] + u2[i] * v2[j]; } #pragma omp parallel for default(shared) private(i, j) firstprivate(n, beta, A, y) for(i = 0; i < n; i++) { // #pragma omp parallel for default(shared) private(j) firstprivate(n, beta, i, A, y) reduction(+ : x[i]) for(j = 0; j < n; j++) x[i] = x[i] + beta * A[j][i] * y[j]; } #pragma omp parallel for default(shared) private(i) firstprivate(n, z) for(i = 0; i < n; i++) x[i] = x[i] + z[i]; #pragma omp parallel for default(shared) private(i, j) firstprivate(n, alpha, A, x) for(i = 0; i < n; i++) { // #pragma omp parallel for default(shared) private(j) firstprivate(n, alpha, i, A, x) reduction(+ : w[i]) for(j = 0; j < n; j++) w[i] = w[i] + alpha * A[i][j] * x[j]; } } int main(int argc, char **argv) { /*Retrieve problem size.*/ int n = 2000; /*Variable declaration/allocation.*/ double alpha; double beta; double (*A)[2000][2000]; A = (double (*)[2000][2000]) polybench_alloc_data((2000 + 0) * (2000 + 0), sizeof(double)); ; double (*u1)[2000]; u1 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double)); ; double (*v1)[2000]; v1 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double)); ; double (*u2)[2000]; u2 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double)); ; double (*v2)[2000]; v2 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double)); ; double (*w)[2000]; w = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double)); ; double (*x)[2000]; x = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double)); ; double (*y)[2000]; y = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double)); ; double (*z)[2000]; z = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double)); ; /*Initialize array(s).*/ init_array(n, &alpha, &beta, *A, *u1, *v1, *u2, *v2, *w, *x, *y, *z); /*Start timer.*/ ; /*Run kernel.*/ kernel_gemver(n, alpha, beta, *A, *u1, *v1, *u2, *v2, *w, *x, *y, *z); /*Stop and print timer.*/ ; ; /*Prevent dead-code elimination. All live-out data must be printed by the function call in argument.*/ if(argc > 42 && !strcmp(argv[0], "")) print_array(n, *w); /*Be clean.*/ free((void *) A); ; free((void *) u1); ; free((void *) v1); ; free((void *) u2); ; free((void *) v2); ; free((void *) w); ; free((void *) x); ; free((void *) y); ; free((void *) z); ; return 0; }
interpolation_p1.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <math.h> //------------------------------------------------------------------------------------------------------------------------------ static inline void interpolation_p1_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){ // interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[] int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block int write_dim_j = block->dim.j<<1; int write_dim_k = block->dim.k<<1; int read_i = block->read.i; int read_j = block->read.j; int read_k = block->read.k; int read_jStride = block->read.jStride; int read_kStride = block->read.kStride; int write_i = block->write.i; int write_j = block->write.j; int write_k = block->write.k; int write_jStride = block->write.jStride; int write_kStride = block->write.kStride; const double * __restrict__ read = block->read.ptr; double * __restrict__ write = block->write.ptr; if(block->read.box >=0){ read_jStride = level_c->my_boxes[block->read.box ].jStride; read_kStride = level_c->my_boxes[block->read.box ].kStride; read = level_c->my_boxes[ block->read.box].vectors[id_c] + level_c->box_ghosts*(1+ read_jStride+ read_kStride); } if(block->write.box>=0){ write_jStride = level_f->my_boxes[block->write.box].jStride; write_kStride = level_f->my_boxes[block->write.box].kStride; write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->box_ghosts*(1+write_jStride+write_kStride); } int i,j,k; for(k=0;k<write_dim_k;k++){int delta_k=-read_kStride;if(k&0x1)delta_k=read_kStride; for(j=0;j<write_dim_j;j++){int delta_j=-read_jStride;if(j&0x1)delta_j=read_jStride; for(i=0;i<write_dim_i;i++){int delta_i= -1;if(i&0x1)delta_i= 1; // i.e. even points look backwards while odd points look forward int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride); int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride); // // | o | o | // +---+---+---+---+ // | | x | x | | // // CAREFUL !!! you must guarantee you zero'd the MPI buffers(write[]) and destination boxes at some point to avoid 0.0*NaN or 0.0*inf // piecewise linear interpolation... NOTE, BC's must have been previously applied write[write_ijk] = prescale_f*write[write_ijk] + 0.421875*read[read_ijk ] + 0.140625*read[read_ijk +delta_k] + 0.140625*read[read_ijk +delta_j ] + 0.046875*read[read_ijk +delta_j+delta_k] + 0.140625*read[read_ijk+delta_i ] + 0.046875*read[read_ijk+delta_i +delta_k] + 0.046875*read[read_ijk+delta_i+delta_j ] + 0.015625*read[read_ijk+delta_i+delta_j+delta_k]; }}} } //------------------------------------------------------------------------------------------------------------------------------ // perform a (inter-level) piecewise linear interpolation void interpolation_p1(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){ exchange_boundary(level_c,id_c,STENCIL_SHAPE_BOX); apply_BCs_p1(level_c,id_c,STENCIL_SHAPE_BOX); double _timeCommunicationStart = getTime(); double _timeStart,_timeEnd; int buffer=0; int n; int my_tag = (level_f->tag<<4) | 0x7; #ifdef USE_MPI // by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends... int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs; MPI_Request *recv_requests = level_f->interpolation.requests; MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs; // loop through packed list of MPI receives and prepost Irecv's... if(level_f->interpolation.num_recvs>0){ _timeStart = getTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_f->interpolation.num_recvs;n++){ MPI_Irecv(level_f->interpolation.recv_buffers[n], level_f->interpolation.recv_sizes[n], MPI_DOUBLE, level_f->interpolation.recv_ranks[n], my_tag, MPI_COMM_WORLD, &recv_requests[n] ); } _timeEnd = getTime(); level_f->timers.interpolation_recv += (_timeEnd-_timeStart); } // pack MPI send buffers... if(level_c->interpolation.num_blocks[0]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0]) for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){ // !!! prescale==0 because you don't want to increment the MPI buffer interpolation_p1_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]); } _timeEnd = getTime(); level_f->timers.interpolation_pack += (_timeEnd-_timeStart); } // loop through MPI send buffers and post Isend's... if(level_c->interpolation.num_sends>0){ _timeStart = getTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_c->interpolation.num_sends;n++){ MPI_Isend(level_c->interpolation.send_buffers[n], level_c->interpolation.send_sizes[n], MPI_DOUBLE, level_c->interpolation.send_ranks[n], my_tag, MPI_COMM_WORLD, &send_requests[n] ); } _timeEnd = getTime(); level_f->timers.interpolation_send += (_timeEnd-_timeStart); } #endif // perform local interpolation... try and hide within Isend latency... if(level_c->interpolation.num_blocks[1]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1]) for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){ interpolation_p1_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]); } _timeEnd = getTime(); level_f->timers.interpolation_local += (_timeEnd-_timeStart); } // wait for MPI to finish... #ifdef USE_MPI if(nMessages>0){ _timeStart = getTime(); MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status); _timeEnd = getTime(); level_f->timers.interpolation_wait += (_timeEnd-_timeStart); } // unpack MPI receive buffers if(level_f->interpolation.num_blocks[2]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2]) for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){ IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]); } _timeEnd = getTime(); level_f->timers.interpolation_unpack += (_timeEnd-_timeStart); } #endif level_f->timers.interpolation_total += (double)(getTime()-_timeCommunicationStart); }
jacobi-omp5.c
/* * BSD 2-Clause License * * Copyright (c) 2020, Alessandro Capotondi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file jacobi.c * @author Alessandro Capotondi * @date 27 Mar 2020 * @brief This code solves the steady state heat equation on a rectangular region. * This code solves the steady state heat equation on a rectangular region. * The sequential version of this program needs approximately * 18/epsilon iterations to complete. * The physical region, and the boundary conditions, are suggested * by this diagram; * W = 0 * +------------------+ * | | * W = 100 | | W = 100 * | | * +------------------+ * W = 100 * The region is covered with a grid of M by N nodes, and an N by N * array W is used to record the temperature. The correspondence between * array indices and locations in the region is suggested by giving the * indices of the four corners: * I = 0 * [0][0]-------------[0][N-1] * | | * J = 0 | | J = N-1 * | | * [M-1][0]-----------[M-1][N-1] * I = M-1 * The steady state solution to the discrete heat equation satisfies the * following condition at an interior grid point: * W[Central] = (1/4) * ( W[North] + W[South] + W[East] + W[West] ) * where "Central" is the index of the grid point, "North" is the index * of its immediate neighbor to the "north", and so on. * * Given an approximate solution of the steady state heat equation, a * "better" solution is given by replacing each interior point by the * average of its 4 neighbors - in other words, by using the condition * as an ASSIGNMENT statement: * W[Central] <= (1/4) * ( W[North] + W[South] + W[East] + W[West] ) * If this process is repeated often enough, the difference between successive * estimates of the solution will go to zero. * This program carries out such an iteration, using a tolerance specified by * the user, and writes the final estimate of the solution to a file that can * be used for graphic processing. * icensing: * This code is distributed under the GNU LGPL license. * odified: * 18 October 2011 * uthor: * Original C version by Michael Quinn. * This C version by John Burkardt. * eference: * Michael Quinn, * Parallel Programming in C with MPI and OpenMP, * McGraw-Hill, 2004, * ISBN13: 978-0071232654, * LC: QA76.73.C15.Q55. * ocal parameters: * Local, double DIFF, the norm of the change in the solution from one iteration * to the next. * Local, double MEAN, the average of the boundary values, used to initialize * the values of the solution in the interior. * Local, double U[M][N], the solution at the previous iteration. * Local, double W[M][N], the solution computed at the latest iteration. * * * @see https://en.wikipedia.org/wiki/Jacobi_method * @see http://algo.ing.unimo.it/people/andrea/Didattica/HPC/index.html */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include "utils.h" static int N; static int MAX_ITERATIONS; static int SEED; static double CONVERGENCE_THRESHOLD; static FILE *data; #define SEPARATOR "------------------------------------\n" // Return the current time in seconds since the Epoch double get_timestamp(); // Parse command line arguments to set solver parameters void parse_arguments(int argc, char *argv[]); // Run the Jacobi solver // Returns the number of iterations performed int run(double *restrict A, double *restrict xtmp) { int iter = 0, iterations_print = 1; double err = 0.0; #pragma omp target enter data map(to \ : A [0:N * N]) map(alloc \ : xtmp [0:N * N]) do { err = 0.0; #pragma omp target teams num_teams(N / NTHREADS_GPU) thread_limit(NTHREADS_GPU) map(tofrom \ : err) #pragma omp distribute parallel for collapse(2) num_threads(NTHREADS_GPU) dist_schedule(static, NTHREADS_GPU) reduction(max \ : err) for (int i = 1; i < N - 1; i++) { for (int j = 1; j < N - 1; j++) { xtmp[i * N + j] = 0.25 * (A[(i - 1) * N + j] + A[(i + 1) * N + j] + A[i * N + j - 1] + A[i * N + j + 1]); err = fmax(err, fabs(xtmp[i * N + j] - A[i * N + j])); } } //#pragma omp target update from(xtmp[0:N*N]) #pragma omp target teams num_teams(N / NTHREADS_GPU) thread_limit(NTHREADS_GPU) #pragma omp distribute parallel for collapse(2) num_threads(NTHREADS_GPU) dist_schedule(static, NTHREADS_GPU) for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { A[i * N + j] = xtmp[i * N + j]; } } iter++; #ifdef DEBUG if (iter == iterations_print) { printf(" %8d %f\n", iter, err); iterations_print = 2 * iterations_print; } #endif } while (err > CONVERGENCE_THRESHOLD && iter < MAX_ITERATIONS); #pragma omp target exit data map(from \ : A [0:N * N]) map(release \ : xtmp) return iter; } int main(int argc, char *argv[]) { parse_arguments(argc, argv); double *A = malloc(N * N * sizeof(double)); double *xtmp = malloc(N * N * sizeof(double)); printf(SEPARATOR); printf("Matrix size: %dx%d\n", N, N); printf("Maximum iterations: %d\n", MAX_ITERATIONS); printf("Convergence threshold: %lf\n", CONVERGENCE_THRESHOLD); printf(SEPARATOR); for (int ii = 0; ii < N; ii++) { for (int jj = 0; jj < N; jj++) { double f; fread(&f, sizeof(double), 1, data); A[ii * N + jj] = f; } } // Run Jacobi solver start_timer(); int itr = run(A, xtmp); stop_timer(); printf("Iterations = %d\n", itr); printf("Solver runtime = %lf ms\n", elapsed_ns() / 1E6); if (itr == MAX_ITERATIONS) printf("WARNING: solution did not converge\n"); printf(SEPARATOR); free(A); free(xtmp); fclose(data); return 0; } int parse_int(const char *str) { char *next; int value = strtoul(str, &next, 10); return strlen(next) ? -1 : value; } double parse_double(const char *str) { char *next; double value = strtod(str, &next); return strlen(next) ? -1 : value; } void parse_arguments(int argc, char *argv[]) { // Set default values N = 500; MAX_ITERATIONS = 2000; CONVERGENCE_THRESHOLD = 0.001; SEED = 0; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "--convergence") || !strcmp(argv[i], "-c")) { if (++i >= argc || (CONVERGENCE_THRESHOLD = parse_double(argv[i])) < 0) { printf("Invalid convergence threshold\n"); exit(1); } } else if (!strcmp(argv[i], "--iterations") || !strcmp(argv[i], "-i")) { if (++i >= argc || (MAX_ITERATIONS = parse_int(argv[i])) < 0) { printf("Invalid number of iterations\n"); exit(1); } } else if (!strcmp(argv[i], "--norder") || !strcmp(argv[i], "-n")) { if (++i >= argc || (N = parse_int(argv[i])) < 0) { printf("Invalid matrix order\n"); exit(1); } } else if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) { printf("\n"); printf("Usage: ./jacobi [OPTIONS]\n\n"); printf("Options:\n"); printf(" -h --help Print this message\n"); printf(" -c --convergence C Set convergence threshold\n"); printf(" -i --iterations I Set maximum number of iterations\n"); printf(" -n --norder N Set maxtrix order (500 or 1000)\n"); printf("\n"); exit(0); } else { printf("Unrecognized argument '%s' (try '--help')\n", argv[i]); exit(1); } } if (N == 1000) data = fopen("data/jacobi-1000.bin", "rb"); else if (N == 500) data = fopen("data/jacobi-500.bin", "rb"); else { printf("Invalid matrix order\n"); exit(1); } }
lrthresh.c
/* Copyright 2015. The Regents of the University of California. * Copyright 2015. Tao Zhang and Joseph Cheng. * Copyright 2016-2018. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2014-2015 Frank Ong <frankong@berkeley.edu> * 2014 Tao Zhang * 2014 Joseph Cheng * 2014 Jon Tamir * 2014-2018 Martin Uecker */ #include <stdlib.h> #include <complex.h> #include <math.h> #include "misc/misc.h" #include "misc/mri.h" #include "misc/debug.h" #include "num/multind.h" #include "num/flpmath.h" #include "num/linalg.h" #include "num/ops.h" #include "num/blockproc.h" #include "num/casorati.h" #include "iter/thresh.h" #include "lowrank/batchsvd.h" #include "lowrank/svthresh.h" #include "lrthresh.h" struct lrthresh_data_s { INTERFACE(operator_data_t); float lambda; bool randshift; bool noise; int remove_mean; long strs_lev[DIMS]; long strs[DIMS]; long dims_decom[DIMS]; long dims[DIMS]; unsigned long mflags; unsigned long flags; long levels; long blkdims[MAX_LEV][DIMS]; bool overlapping_blocks; }; static DEF_TYPEID(lrthresh_data_s); static struct lrthresh_data_s* lrthresh_create_data(const long dims_decom[DIMS], bool randshift, unsigned long mflags, const long blkdims[MAX_LEV][DIMS], float lambda, bool noise, int remove_mean, bool overlapping_blocks); static void lrthresh_free_data(const operator_data_t* data); static void lrthresh_apply(const operator_data_t* _data, float lambda, complex float* dst, const complex float* src); /** * Intialize lrthresh operator * * @param dims_decom - decomposition dimensions * @param randshift - randshift boolean * @param mflags - selects which dimensions gets reshaped as the first dimension in matrix * @param blkdims - contains block dimensions for all levels * */ const struct operator_p_s* lrthresh_create(const long dims_lev[DIMS], bool randshift, unsigned long mflags, const long blkdims[MAX_LEV][DIMS], float lambda, bool noise, int remove_mean, bool overlapping_blocks) { struct lrthresh_data_s* data = lrthresh_create_data(dims_lev, randshift, mflags, blkdims, lambda, noise, remove_mean, overlapping_blocks); return operator_p_create(DIMS, dims_lev, DIMS, dims_lev, CAST_UP(data), lrthresh_apply, lrthresh_free_data); } /** * Intialize lrthresh data * * @param dims_decom - dimensions with levels at LEVEL_DIMS * @param randshift - randshift boolean * @param mflags - selects which dimensions gets reshaped as the first dimension in matrix * @param blkdims - contains block dimensions for all levels * */ static struct lrthresh_data_s* lrthresh_create_data(const long dims_decom[DIMS], bool randshift, unsigned long mflags, const long blkdims[MAX_LEV][DIMS], float lambda, bool noise, int remove_mean, bool overlapping_blocks) { PTR_ALLOC(struct lrthresh_data_s, data); SET_TYPEID(lrthresh_data_s, data); data->randshift = randshift; data->mflags = mflags; data->lambda = lambda; data->noise = noise; data->remove_mean = remove_mean; data->overlapping_blocks = overlapping_blocks; // level dimensions md_copy_dims(DIMS, data->dims_decom, dims_decom); md_calc_strides(DIMS, data->strs_lev, dims_decom, CFL_SIZE); // image dimensions data->levels = dims_decom[LEVEL_DIM]; md_select_dims(DIMS, ~LEVEL_FLAG, data->dims, dims_decom); md_calc_strides(DIMS, data->strs, data->dims, CFL_SIZE); // blkdims for(long l = 0; l < data->levels; l++) { for (long i = 0; i < DIMS; i++) data->blkdims[l][i] = blkdims[l][i]; } return PTR_PASS(data); } /** * Free lrthresh operator */ static void lrthresh_free_data(const operator_data_t* _data) { xfree(CAST_DOWN(lrthresh_data_s, _data)); } /* * Return a random number between 0 and limit inclusive. */ static int rand_lim(int limit) { int divisor = RAND_MAX / (limit + 1); int retval; do { retval = rand() / divisor; } while (retval > limit); return retval; } /* * Low rank threhsolding for arbitrary block sizes */ static void lrthresh_apply(const operator_data_t* _data, float mu, complex float* dst, const complex float* src) { auto data = CAST_DOWN(lrthresh_data_s, _data); float lambda = mu * data->lambda; long strs1[DIMS]; md_calc_strides(DIMS, strs1, data->dims_decom, 1); //#pragma omp parallel for for (int l = 0; l < data->levels; l++) { complex float* dstl = dst + l * strs1[LEVEL_DIM]; const complex float* srcl = src + l * strs1[LEVEL_DIM]; long blkdims[DIMS]; long shifts[DIMS]; long unshifts[DIMS]; long zpad_dims[DIMS]; long M = 1; for (unsigned int i = 0; i < DIMS; i++) { blkdims[i] = data->blkdims[l][i]; zpad_dims[i] = (data->dims[i] + blkdims[i] - 1) / blkdims[i]; zpad_dims[i] *= blkdims[i]; if (MD_IS_SET(data->mflags, i)) M *= blkdims[i]; if (data->randshift) shifts[i] = rand_lim(MIN(blkdims[i] - 1, zpad_dims[i] - blkdims[i])); else shifts[i] = 0; unshifts[i] = -shifts[i]; } long zpad_strs[DIMS]; md_calc_strides(DIMS, zpad_strs, zpad_dims, CFL_SIZE); long blk_size = md_calc_size(DIMS, blkdims); long img_size = md_calc_size(DIMS, zpad_dims); long N = blk_size / M; long B = img_size / blk_size; if (data->noise && (l == data->levels - 1)) { M = img_size; N = 1; B = 1; } complex float* tmp = md_alloc_sameplace(DIMS, zpad_dims, CFL_SIZE, dst); md_circ_ext(DIMS, zpad_dims, tmp, data->dims, srcl, CFL_SIZE); md_circ_shift(DIMS, zpad_dims, shifts, tmp, tmp, CFL_SIZE); long mat_dims[2]; (data->overlapping_blocks ? casorati_dims : basorati_dims)(DIMS, mat_dims, blkdims, zpad_dims); complex float* tmp_mat = md_alloc_sameplace(2, mat_dims, CFL_SIZE, dst); complex float* tmp_mat2 = tmp_mat; // Reshape image into a blk_size x number of blocks matrix (data->overlapping_blocks ? casorati_matrix : basorati_matrix)(DIMS, blkdims, mat_dims, tmp_mat, zpad_dims, zpad_strs, tmp); long num_blocks = mat_dims[1]; long mat2_dims[2] = { mat_dims[0], mat_dims[1] }; // FIXME: casorati and basorati are transposes of each other if (data->overlapping_blocks) { mat2_dims[0] = mat_dims[1]; mat2_dims[1] = mat_dims[0]; tmp_mat2 = md_alloc_sameplace(2, mat2_dims, CFL_SIZE, dst); md_transpose(2, 0, 1, mat2_dims, tmp_mat2, mat_dims, tmp_mat, CFL_SIZE); num_blocks = mat2_dims[1]; if (B > 1) B = mat2_dims[1]; } debug_printf(DP_DEBUG4, "M=%d, N=%d, B=%d, num_blocks=%d, img_size=%d, blk_size=%d\n", M, N, B, num_blocks, img_size, blk_size); batch_svthresh(M, N, num_blocks, lambda * GWIDTH(M, N, B), *(complex float (*)[mat2_dims[1]][M][N])tmp_mat2); // for ( int b = 0; b < mat_dims[1]; b++ ) // svthresh(M, N, lambda * GWIDTH(M, N, B), tmp_mat, tmp_mat); if (data->overlapping_blocks) { md_transpose(2, 0, 1, mat_dims, tmp_mat, mat2_dims, tmp_mat2, CFL_SIZE); } (data->overlapping_blocks ? casorati_matrixH : basorati_matrixH)(DIMS, blkdims, zpad_dims, zpad_strs, tmp, mat_dims, tmp_mat); if (data->overlapping_blocks) { md_zsmul(DIMS, zpad_dims, tmp, tmp, 1. / M); md_free(tmp_mat2); } md_circ_shift(DIMS, zpad_dims, unshifts, tmp, tmp, CFL_SIZE); md_resize(DIMS, data->dims, dstl, zpad_dims, tmp, CFL_SIZE); md_free(tmp); md_free(tmp_mat); } } /* * Nuclear norm calculation for arbitrary block sizes */ float lrnucnorm(const struct operator_p_s* op, const complex float* src) { struct lrthresh_data_s* data = (struct lrthresh_data_s*)operator_p_get_data(op); long strs1[DIMS]; md_calc_strides(DIMS, strs1, data->dims_decom, 1); float nnorm = 0.; for (int l = 0; l < data->levels; l++) { const complex float* srcl = src + l * strs1[LEVEL_DIM]; long blkdims[DIMS]; long blksize = 1; for (unsigned int i = 0; i < DIMS; i++) { blkdims[i] = data->blkdims[l][i]; blksize *= blkdims[i]; } if (1 == blksize) { for (long j = 0; j < md_calc_size(DIMS, data->dims); j++) nnorm += 2 * cabsf(srcl[j]); continue; } struct svthresh_blockproc_data* svdata = svthresh_blockproc_create(data->mflags, 0., 0); complex float* tmp = md_alloc_sameplace(DIMS, data->dims, CFL_SIZE, src); //debug_print_dims(DP_DEBUG1, DIMS, data->dims); md_copy(DIMS, data->dims, tmp, srcl, CFL_SIZE); // Block SVD Threshold nnorm = blockproc(DIMS, data->dims, blkdims, (void*)svdata, nucnorm_blockproc, tmp, tmp); xfree(svdata); md_free(tmp); } return nnorm; } /************* * Block dimensions functions *************/ /** * Generates multiscale low rank block sizes * * @param blkdims - block sizes to be written * @param flags - specifies which dimensions to do the blocks. The other dimensions will be the same as input * @param idims - input dimensions * @param blkskip - scale each level by blkskip to generate the next level * * returns number of levels */ long multilr_blkdims(long blkdims[MAX_LEV][DIMS], unsigned long flags, const long idims[DIMS], int blkskip, long initblk) { // Multiscale low rank block sizes long tmp_block[DIMS]; for (unsigned int i = 0; i < DIMS; i++) { if (MD_IS_SET(flags, i)) tmp_block[i] = MIN(initblk, idims[i]); else tmp_block[i] = idims[i]; } bool done; // Loop block_sizes long levels = 0; do { levels++; debug_printf(DP_INFO, "[\t"); for (unsigned int i = 0; i < DIMS; i++) { blkdims[levels - 1][i] = tmp_block[i]; debug_printf(DP_INFO, "%ld\t", blkdims[levels-1][i]); } debug_printf(DP_INFO, "]\n"); done = true; for (unsigned int i = 0; i < DIMS; i++) { if (MD_IS_SET(flags, i) && (idims[i] != 1)) { tmp_block[i] = MIN(tmp_block[i] * blkskip, idims[i]); done = done && (blkdims[levels - 1][i] == idims[i]); } } } while(!done); return levels; } void add_lrnoiseblk(long* levels, long blkdims[MAX_LEV][DIMS], const long idims[DIMS]) { levels[0]++; debug_printf(DP_DEBUG1, "[\t"); for (unsigned int i = 0; i < DIMS; i++) { blkdims[levels[0] - 1][i] = idims[i]; debug_printf(DP_DEBUG1, "%ld\t", blkdims[levels[0] - 1][i]); } debug_printf(DP_DEBUG1, "]\n"); } /** * Generates locally low rank block sizes * * @param blkdims - block sizes to be written * @param flags - specifies which dimensions to do the blocks. The other dimensions will be the same as input * @param idims - input dimensions * @param llkblk - the block size * * returns number of levels = 1 */ long llr_blkdims(long blkdims[MAX_LEV][DIMS], unsigned long flags, const long idims[DIMS], long llrblk) { for (unsigned int i = 0; i < DIMS; i++) { if (MD_IS_SET(flags, i)) blkdims[0][i] = MIN(llrblk, idims[i]); else blkdims[0][i] = idims[i]; } return 1; } /** * Generates low rank + sparse block sizes * * @param blkdims - block sizes to be written * @param idims - input dimensions * * returns number of levels = 2 */ long ls_blkdims(long blkdims[MAX_LEV][DIMS], const long idims[DIMS]) { for (unsigned int i = 0; i < DIMS; i++) { blkdims[0][i] = 1; blkdims[1][i] = idims[i]; } return 2; } float get_lrthresh_lambda(const struct operator_p_s* o) { auto data = CAST_DOWN(lrthresh_data_s, operator_p_get_data(o)); return data->lambda; }
sieveOfErastotenes.c
/* Adaptado de: https://ideone.com/JU5CfV --- Speed de 1,6 Sequencial real 0m4.055s user 0m3.981s sys 0m0.068s Paralelo static 100 real 0m2.934s user 0m9.502s sys 0m0.088s Paralelo dynamic 100 real 0m2.511s user 0m9.518s sys 0m0.092s Paralelo guided 100 real 0m3.670s user 0m7.022s sys 0m0.088s */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <time.h> #include <math.h> #include <omp.h> int sieveOfEratosthenes(int n) { // Create a boolean array "prime[0..n]" and initialize // all entries it as true. A value in prime[i] will // finally be false if i is Not a prime, else true. int primes = 0; bool *prime = (bool *)malloc((n + 1) * sizeof(bool)); int sqrt_n = sqrt(n); memset(prime, true, (n + 1) * sizeof(bool)); #pragma omp parallel for schedule(static,100) for (int p = 2; p <= sqrt_n; p++) { // If prime[p] is not changed, then it is a prime if (prime[p] == true) { // Update all multiples of p for (int i = p * 2; i <= n; i += p) prime[i] = false; } } // count prime numbers #pragma omp parallel for reduction(+:primes) for (int p = 2; p <= n; p++) if (prime[p]) primes++; return (primes); } int main() { int n = 100000000; printf("%d\n", sieveOfEratosthenes(n)); return 0; }
update_ops_reflect.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "constant.h" #include "update_ops.h" #include "stat_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif void reflection_gate(const CTYPE* reflection_state, CTYPE* state, ITYPE dim) { CTYPE coef = state_inner_product(reflection_state, state, dim); #ifdef _OPENMP #pragma omp parallel for #endif for (ITYPE state_index = 0; state_index < dim; ++state_index) { state[state_index] = 2.0 * coef * reflection_state[state_index] - state[state_index]; } }
GB_binop__isne_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__isne_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__isne_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_fc32) // C=scalar+B GB (_bind1st__isne_fc32) // C=scalar+B' GB (_bind1st_tran__isne_fc32) // C=A+scalar GB (_bind2nd__isne_fc32) // C=A'+scalar GB (_bind2nd_tran__isne_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 0 // BinaryOp: cij = GB_FC32_isne (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_isne (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_FC32 || GxB_NO_ISNE_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isne_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_isne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_isne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_isne (x, aij) ; \ } GrB_Info GB (_bind1st_tran__isne_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_isne (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__isne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_offloading_map.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-aarch64-unknown-linux-gnu | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-powerpc64-ibm-linux-gnu | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-powerpc64le-ibm-linux-gnu | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-x86_64-pc-linux-gnu | %fcheck-x86_64-pc-linux-gnu -allow-empty // RUN: %libomptarget-compile-nvptx64-nvidia-cuda && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-nvptx64-nvidia-cuda | %fcheck-nvptx64-nvidia-cuda -allow-empty #include <assert.h> #include <stdio.h> int main(int argc, char *argv[]) { const int num_threads = 64, N = 128; int array[num_threads] = {0}; #pragma omp parallel for for (int i = 0; i < num_threads; ++i) { int tmp[N]; for (int j = 0; j < N; ++j) { tmp[j] = i; } #pragma omp target teams distribute parallel for map(tofrom : tmp) for (int j = 0; j < N; ++j) { tmp[j] += j; } for (int j = 0; j < N; ++j) { array[i] += tmp[j]; } } // Verify for (int i = 0; i < num_threads; ++i) { const int ref = (0 + N - 1) * N / 2 + i * N; assert(array[i] == ref); } printf("PASS\n"); return 0; } // CHECK: PASS
GB_binop__rminus_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint16) // A*D function (colscale): GB (_AxD__rminus_uint16) // D*A function (rowscale): GB (_DxB__rminus_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint16) // C=scalar+B GB (_bind1st__rminus_uint16) // C=scalar+B' GB (_bind1st_tran__rminus_uint16) // C=A+scalar GB (_bind2nd__rminus_uint16) // C=A'+scalar GB (_bind2nd_tran__rminus_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT16 || GxB_NO_RMINUS_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ordering_op-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file ordering_op-inl.h * \brief Function definition of ordering operators */ #ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #include <mxnet/operator_util.h> #include <dmlc/optional.h> #include <mshadow/tensor.h> #include <algorithm> #include <vector> #include <string> #include <type_traits> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "./sort_op.h" #include "./indexing_op.h" #include "../../api/operator/op_utils.h" namespace mshadow { template<typename xpu, int src_dim, typename DType, int dst_dim> inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src, Shape<dst_dim> target_shape) { CHECK_EQ(src.CheckContiguous(), true); return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_); } }; namespace mxnet { namespace op { // These enums are only visible within this header namespace topk_enum { enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth}; } // topk_enum struct TopKParam : public dmlc::Parameter<TopKParam> { dmlc::optional<int> axis; int k; int ret_typ; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(TopKParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose the top k indices." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(k).set_default(1) .describe("Number of top elements to select," " should be always smaller than or equal to the element number in the given axis." " A global sort is performed if set k < 1."); DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices) .add_enum("value", topk_enum::kReturnValue) .add_enum("indices", topk_enum::kReturnIndices) .add_enum("mask", topk_enum::kReturnMask) .add_enum("both", topk_enum::kReturnBoth) .describe("The return type.\n" " \"value\" means to return the top k values," " \"indices\" means to return the indices of the top k values," " \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values." " \"both\" means to return a list of both values and indices of top k elements."); DMLC_DECLARE_FIELD(is_ascend).set_default(false) .describe("Whether to choose k largest or k smallest elements." " Top K largest elements will be chosen if set to false."); DMLC_DECLARE_FIELD(dtype) // TODO(srivrohi): remove support for real data type in mxnet-2.0 .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("int64", mshadow::kInt64) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices when ret_typ is \"indices\" or \"both\". " "An error will be raised if the selected data type cannot precisely represent the " "indices."); } }; struct SortParam : public dmlc::Parameter<SortParam> { dmlc::optional<int> axis; bool is_ascend; DMLC_DECLARE_PARAMETER(SortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, is_ascend_s; axis_s << axis; is_ascend_s << is_ascend; (*dict)["axis"] = axis_s.str(); (*dict)["is_ascend_s"] = is_ascend_s.str(); } }; struct ArgSortParam : public dmlc::Parameter<ArgSortParam> { dmlc::optional<int> axis; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(ArgSortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); DMLC_DECLARE_FIELD(dtype) // TODO(srivrohi): remove support for real data type in mxnet-2.0 .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("int64", mshadow::kInt64) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or" " \"both\". An error will be raised if the selected data type cannot precisely " "represent the indices."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, is_ascend_s, dtype_s; axis_s << axis; is_ascend_s << is_ascend; dtype_s << dtype; (*dict)["axis"] = axis_s.str(); (*dict)["is_ascend_s"] = is_ascend_s.str(); (*dict)["dtype"] = MXNetTypeWithBool2String(dtype); } }; inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape, size_t *batch_size, index_t *element_num, int *axis, index_t *k, bool *do_transpose, bool *is_ascend) { *do_transpose = false; *k = param.k; *is_ascend = param.is_ascend; // get batch_size, axis and element_num if (!static_cast<bool>(param.axis)) { // No axis given *axis = 0; *batch_size = 1; *element_num = src_shape.Size(); } else { *axis = param.axis.value(); if (*axis < 0) { *axis += src_shape.ndim(); } CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim())) << "Invalid axis! axis should be between 0 and " << src_shape.ndim() << ", found axis=" << *axis; if (src_shape[*axis] != 0) { *batch_size = src_shape.Size() / src_shape[*axis]; } *element_num = src_shape[*axis]; if (*axis != src_shape.ndim() - 1) { *do_transpose = true; } } // get k if (param.k <= 0) { *k = *element_num; } // get target_shape if (!static_cast<bool>(param.axis)) { if (param.ret_typ != topk_enum::kReturnMask) { *target_shape = mshadow::Shape1(*k); } else { *target_shape = src_shape; } } else { *target_shape = src_shape; if (param.ret_typ != topk_enum::kReturnMask) { (*target_shape)[*axis] = *k; } } CHECK(*k >= 0 && *k <= *element_num) << "k must be smaller than " << *element_num << ", get k = " << *k; } using namespace mshadow; struct fill_ind_to_one { template<typename DType> MSHADOW_XINLINE static void Map(int i, const index_t* indices, DType* out) { out[indices[i]] = static_cast<DType>(1); } }; struct fill_ind { template<typename DType> MSHADOW_XINLINE static void Map(int i, const index_t* indices, const DType* val, int req, DType* out) { KERNEL_ASSIGN(out[indices[i]], req, val[i]); } }; template<typename DType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat, const Tensor<cpu, 1, index_t>& ind, const Tensor<cpu, 1, char>& work, index_t K, index_t N, bool is_ascend, Stream<cpu> *s) { // Use full sort when K is relatively large. const bool full_sort(K*8 > N); // Batch size. const index_t M(work.size(0)/(sizeof(DType)*N)); const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < M; ++i) { // Tensor `work` stores the flattened source data, while `dat` stores the sorted result. DType *vals = reinterpret_cast<DType*>(work.dptr_); DType *sorted_vals = dat.dptr_+i*N; index_t *indices = ind.dptr_+i*N; if (is_ascend) { if (full_sort) { std::sort(indices, indices+N, [&](const index_t& i1, const index_t& i2){ return vals[i1] < vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const index_t& i1, const index_t& i2){ return vals[i1] < vals[i2]; }); } } else { if (full_sort) { std::sort(indices, indices+N, [&](const index_t& i1, const index_t& i2){ return vals[i1] > vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const index_t& i1, const index_t& i2){ return vals[i1] > vals[i2]; }); } } for (index_t j = 0; j < K; ++j) { sorted_vals[j] = vals[indices[j]]; } } } #ifdef __CUDACC__ template<typename DType> MSHADOW_XINLINE bool TopKCompare(DType val1, index_t ind1, DType val2, index_t ind2, bool is_ascend) { // Negative indices denote undefined values which are considered arbitrary small resp. large. return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2))); } template<typename DType> MSHADOW_XINLINE void MergeTopK(index_t K, DType *val1, index_t *ind1, DType *val2, index_t *ind2, bool is_ascend) { // In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals // [0,..,i1], [0,..i2] of the two lists that will be part of the merged list. index_t i1(K-1), i2(K-1); for (index_t i = 0; i < K; ++i) { if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) { --i2; } else { --i1; } } // Now merge the lists from back to front. for (index_t i = K; i--;) { if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) { val1[i] = val1[i1]; ind1[i] = ind1[i1]; --i1; } else { val1[i] = val2[i2]; ind1[i] = ind2[i2]; --i2; } } } template<typename DType> __global__ void PartialSortSmallK(index_t K, index_t N, DType *val, index_t *ind, bool is_ascend) { // Buffer for pairwise reduction. extern __shared__ index_t buff[]; // Start of buffer sections associated with this thread. const index_t offset(threadIdx.x*K); index_t *ind_buff = &buff[offset]; DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset; // Initialize top-K values for this thread. for (index_t i = 0; i < K; ++i) { ind_buff[i] = -1; } // Range of values this thread cares about. Each thread block processes // a different batch item (i.e. a different set of ind/val where we // have to select the top-K elements). All threads within the same // block work on the same batch item. const index_t first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N); // Select top-K from this range and store it sorted in the buffer. // We assume a small K, so linear insertion is o.k. for (index_t i = first; i < last; i += blockDim.x) { DType cur_val(val[i]); index_t cur_ind(ind[i]); for (index_t j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) { if (j+1 < K) { val_buff[j+1] = val_buff[j]; ind_buff[j+1] = ind_buff[j]; } val_buff[j] = cur_val; ind_buff[j] = cur_ind; } } // Recursive merge of sorted lists for this thread block. Note that blockDim.x is not // necessary a power of two, therefore the additional checks for last_s. for (index_t s = (blockDim.x+1)/2, last_s = blockDim.x; last_s > 1; last_s = s, s = (s+1)/2) { __syncthreads(); if (threadIdx.x < s && threadIdx.x+s < last_s) { MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend); } } // Final updates on master thread. if (threadIdx.x == 0) { for (index_t i = 0; i < K; ++i) { ind[blockIdx.x*N+i] = ind_buff[i]; val[blockIdx.x*N+i] = val_buff[i]; } } } template<typename DType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat, const Tensor<gpu, 1, index_t>& ind, const Tensor<gpu, 1, char>& work, index_t K, index_t N, bool is_ascend, Stream<gpu> *s) { // Use full sort for all but very small K for which we // can do a partial sort entirely within shared memory. const bool full_sort(K > 5); // Batch size. const index_t M(dat.size(0)/N); if (full_sort) { // Divide workspace into two parts. The first one is needed to store batch ids. size_t alignment = std::max(sizeof(DType), sizeof(index_t)); size_t id_size = PadBytes(sizeof(index_t) * ind.size(0), alignment); Tensor<gpu, 1, index_t> batch_id(reinterpret_cast<index_t*>(work.dptr_), Shape1(ind.size(0)), s); Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s); mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work); if (M > 1) { // Back to back sorting. Note that mxnet::op::SortByKey is a stable sort. batch_id = ind / N; mxnet::op::SortByKey(batch_id, dat, true, &sort_work); batch_id = ind / N; mxnet::op::SortByKey(batch_id, ind, true, &sort_work); } } else { const int nthreads(mshadow::cuda::kBaseThreadNum); PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(index_t)+sizeof(DType)), mshadow::Stream<gpu>::GetStream(s)>>> (K, N, dat.dptr_, ind.dptr_, is_ascend); } } #endif /*! * \brief Implementation of the TopK operation * * * \param ctx the running context * \param resource temporary resource handler * \param src the Source blob * \param ret the destination blobs * \param param the topk parameters * \tparam xpu the device type. * \tparam DType type of the output value/mask. * \tparam IDType type of the output indices. */ template<typename xpu, typename DType, typename IDType> void TopKImpl(const RunContext &ctx, const Resource &resource, const std::vector<OpReqType>& req, const TBlob& src, const std::vector<TBlob>& ret, const TopKParam& param) { using namespace mshadow; using namespace mshadow::expr; // 0. If input shape is 0-shape, directly return if (src.Size() == 0) return; // 1. Parse and initialize information Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 1, char> workspace; Tensor<xpu, 1, char> temp_workspace; Tensor<xpu, 1, DType> sorted_dat; Tensor<xpu, 1, index_t> indices, sel_indices; size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; size_t alignment = std::max(sizeof(DType), sizeof(index_t)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<index_t>()) << "'index_t' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<index_t>() << " elements"; Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s); // Temp space needed by the full sorts. size_t temp_size = std::max( mxnet::op::SortByKeyWorkspaceSize<index_t, DType, xpu>(src.Size()), mxnet::op::SortByKeyWorkspaceSize<DType, index_t, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<index_t, index_t, xpu>(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += PadBytes(sizeof(index_t) * src.Size(), alignment); // Temp space for cpu sorts. temp_size = std::max(temp_size, sizeof(DType) * src.Size()); size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) + PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { workspace_size += PadBytes(sizeof(index_t) * batch_size * k, alignment); } workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); char* workspace_curr_ptr = workspace.dptr_; sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment); indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { sel_indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += PadBytes(sizeof(index_t) * batch_size * k, alignment); CHECK_EQ(sel_indices.CheckContiguous(), true); } if (std::is_same<xpu, cpu>::value) { Tensor<xpu, 1, DType> flattened_data; if (do_transpose) { flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); workspace_curr_ptr += sizeof(DType) * src.Size(); flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); CHECK_EQ(flattened_data.CheckContiguous(), true); } else { flattened_data = src.FlatTo1D<xpu, DType>(s); } // `temp_workspace` stores the flattened data temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType)*src.Size()), s); CHECK_EQ(temp_workspace.CheckContiguous(), true); } else { if (do_transpose) { sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); } else { sorted_dat = reshape(dat, Shape1(src.Size())); } CHECK_EQ(sorted_dat.CheckContiguous(), true); temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space workspace_curr_ptr += temp_size; } mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, index_t{0}, index_t{1}, kWriteTo, indices.dptr_); CHECK_EQ(indices.CheckContiguous(), true); // 2. Perform inplace batch sort. // After sorting, each batch in `sorted_dat` will be sorted in the corresponding order // up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat` // `temp_workspace` is used to store the flattend source data for CPU device, and it's used as // a temporal buffer for GPU device. TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s); // 3. Assign results to the ret blob // When returning indices, only update(modulo) required elements instead of full elements // to avoid redundant calculation. // Cast `ret_indices` from int to real_t could introduce conversion error when the element_num // is large enough. if (param.ret_typ == topk_enum::kReturnMask) { Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s); ret_mask = scalar<DType>(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), Shape1(batch_size * k)); if (do_transpose) { mxnet::TShape src_shape = src.shape_.FlatTo3D(axis); CHECK_EQ(sel_indices.CheckContiguous(), true); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } if (req[0] == kNullOp) { return; } else if (req[0] == kWriteTo) { mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_); } else { LOG(FATAL) << "req=" << req[0] << " is not supported yet."; } } else if (param.ret_typ == topk_enum::kReturnIndices) { if (do_transpose) { Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, IDType> ret_indices = ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } else { if (do_transpose) { Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s); Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_value, req[0], transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, DType> ret_value = ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s); Tensor<xpu, 2, IDType> ret_indices = ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_value, req[0], slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k)); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } } template<typename xpu, typename DType> size_t TopKWorkspaceSize(const TBlob& src, const TopKParam& param, size_t *temp_size_ptr) { using namespace mshadow; using namespace mshadow::expr; size_t batch_size = 0; size_t temp_size; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; size_t alignment = std::max(sizeof(DType), sizeof(index_t)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); // Temp space needed by the full sorts. temp_size = std::max( mxnet::op::SortByKeyWorkspaceSize<index_t, DType, xpu>(src.Size()), mxnet::op::SortByKeyWorkspaceSize<DType, index_t, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<index_t, index_t, xpu>(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += PadBytes(sizeof(index_t) * src.Size(), alignment); // Temp space for cpu sorts. temp_size = std::max(temp_size, sizeof(DType) * src.Size()); *temp_size_ptr = temp_size; size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) + PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { workspace_size += PadBytes(sizeof(index_t) * batch_size * k, alignment); } return workspace_size; } template<typename xpu, typename DType, typename IDType> void TopKImplwithWorkspace(const RunContext &ctx, const std::vector<OpReqType>& req, const TBlob& src, const std::vector<TBlob>& ret, const TopKParam& param, char* workspace_curr_ptr, const size_t &temp_size, Stream<xpu>* s) { using namespace mshadow; using namespace mshadow::expr; // 0. If input shape is 0-shape, directly return if (src.Size() == 0) return; // 1. Parse and initialize information Tensor<xpu, 1, char> workspace; Tensor<xpu, 1, char> temp_workspace; Tensor<xpu, 1, DType> sorted_dat; Tensor<xpu, 1, index_t> indices, sel_indices; size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; size_t alignment = std::max(sizeof(DType), sizeof(index_t)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<index_t>()) << "'index_t' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<index_t>() << " elements"; Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s); sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment); indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { sel_indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += PadBytes(sizeof(index_t) * batch_size * k, alignment); CHECK_EQ(sel_indices.CheckContiguous(), true); } if (std::is_same<xpu, cpu>::value) { Tensor<xpu, 1, DType> flattened_data; if (do_transpose) { flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); workspace_curr_ptr += sizeof(DType) * src.Size(); flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); CHECK_EQ(flattened_data.CheckContiguous(), true); } else { flattened_data = src.FlatTo1D<xpu, DType>(s); } // `temp_workspace` stores the flattened data temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType)*src.Size()), s); CHECK_EQ(temp_workspace.CheckContiguous(), true); } else { if (do_transpose) { sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); } else { sorted_dat = reshape(dat, Shape1(src.Size())); } CHECK_EQ(sorted_dat.CheckContiguous(), true); temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space workspace_curr_ptr += temp_size; } mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, index_t{0}, index_t{1}, kWriteTo, indices.dptr_); CHECK_EQ(indices.CheckContiguous(), true); // 2. Perform inplace batch sort. // After sorting, each batch in `sorted_dat` will be sorted in the corresponding order // up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat` // `temp_workspace` is used to store the flattend source data for CPU device, and it's used as // a temporal buffer for GPU device. TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s); // 3. Assign results to the ret blob // When returning indices, only update(modulo) required elements instead of full elements // to avoid redundant calculation. // Cast `ret_indices` from int to real_t could introduce conversion error when the element_num // is large enough. if (param.ret_typ == topk_enum::kReturnMask) { Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s); ret_mask = scalar<DType>(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), Shape1(batch_size * k)); if (do_transpose) { mxnet::TShape src_shape = src.shape_.FlatTo3D(axis); CHECK_EQ(sel_indices.CheckContiguous(), true); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } if (req[0] == kNullOp) { return; } else if (req[0] == kWriteTo) { mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_); } else { LOG(FATAL) << "req=" << req[0] << " is not supported yet."; } } else if (param.ret_typ == topk_enum::kReturnIndices) { if (do_transpose) { Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, IDType> ret_indices = ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } else { if (do_transpose) { Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s); Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_value, req[0], transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, DType> ret_value = ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s); Tensor<xpu, 2, IDType> ret_indices = ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_value, req[0], slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k)); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } } template<typename xpu> void TopK(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); }) }); } else { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKImpl<xpu, DType, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); }); } } template<typename xpu> void Sort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKImpl<xpu, DType, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); }); } template<typename xpu> void ArgSort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.dtype = param.dtype; topk_param.ret_typ = topk_enum::kReturnIndices; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); }); }); } template<typename xpu, typename DType, typename IDType> void TopKBackwardImpl(const OpContext &ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const TopKParam& param) { CHECK_NE(req[0], kWriteInplace); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>(); CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth); size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; mxnet::TShape target_shape; ParseTopKParam(outputs[0].shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>()) << "'IDType' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<IDType>() << " elements"; Tensor<xpu, 1, index_t> workspace = ctx.requested[0].get_space_typed<xpu, 1, index_t>(Shape1(batch_size * k + batch_size), s); Tensor<xpu, 1, index_t> sel_indices = Tensor<xpu, 1, index_t>(workspace.dptr_, Shape1(batch_size * k), s); Tensor<xpu, 1, index_t> batch_shift = Tensor<xpu, 1, index_t>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s); Tensor<xpu, 2, DType> out_grad = inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s); Tensor<xpu, 2, DType> in_grad = outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s); mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, index_t{0}, element_num, kWriteTo, batch_shift.dptr_); if (do_transpose) { Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s); mxnet::TShape src_shape = outputs[0].shape_.FlatTo3D(axis); sel_indices = reshape(transpose( broadcast_to(inplace_reshape(batch_shift, Shape3(src_shape[0], src_shape[2], 1)), mxnet::TShape(Shape3(src_shape[0], src_shape[2], k))), Shape3(0, 2, 1)), Shape1(batch_size * k)); sel_indices += tcast<index_t>(indices); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } else { Tensor<xpu, 2, IDType> indices = inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); sel_indices = reshape(tcast<index_t>(indices) + broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)), mxnet::TShape(Shape2(batch_size, k))), Shape1(batch_size * k)); } CHECK_EQ(sel_indices.CheckContiguous(), true); if (kWriteTo == req[0] || kAddTo == req[0]) { if (kWriteTo == req[0]) { in_grad = scalar<DType>(0); } mxnet_op::Kernel<fill_ind, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, out_grad.dptr_, req[0], in_grad.dptr_); } else { LOG(FATAL) << "Not Implemented!"; } } template<typename xpu> void TopKBackward_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKBackwardImpl<xpu, DType, IDType>(ctx, inputs, req, outputs, param); }); }); } else if (param.ret_typ == topk_enum::kReturnValue) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKBackwardImpl<xpu, DType, index_t>(ctx, inputs, req, outputs, param); }); } else { LOG(FATAL) << "Not Implemented"; } } inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { return static_cast<uint32_t>(1); } else { return static_cast<uint32_t>(2); } } inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { return static_cast<uint32_t>(2); } else { return static_cast<uint32_t>(1); } } inline bool TopKType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK(out_size == 1 || out_size == 2); // out_attr[0] -> stores value // out_attr[1] -> stores indices if (out_size > 1) { if (param.ret_typ == topk_enum::kReturnValue) { #if MXNET_USE_INT64_TENSOR_SIZE == 1 CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64)) #else CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) #endif << "Failed to set the type of ret_indices."; } else { CHECK(type_assign(&(*out_attrs)[1], param.dtype)) << "Failed to set the type of ret_indices."; } } if (param.ret_typ == topk_enum::kReturnIndices) { CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices."; } else { TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } return true; } inline bool TopKShapeImpl(const TopKParam& param, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { CHECK_EQ(out_attrs->size(), 1U); } else { CHECK_EQ(out_attrs->size(), 2U); } mxnet::TShape& in_shape = (*in_attrs)[0]; size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; mxnet::TShape target_shape; ParseTopKParam(in_shape, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); } else { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape); } return true; } inline bool TopKShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); return TopKShapeImpl(param, in_attrs, out_attrs); } inline bool SortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { int data_type = -1; size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK_EQ(out_size, 2); #if MXNET_USE_INT64_TENSOR_SIZE == 1 CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64)) #else CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) #endif << "Failed to set the type of ret_indices"; CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; if (data_type == -1) return false; return true; } inline bool SortShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } inline bool ArgSortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices."; return true; } inline bool ArgSortShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnIndices; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
H2GEMM.h
// // Created by Bangtian Liu on 9/1/19. // #ifndef PROJECT_H2GEMM_H #define PROJECT_H2GEMM_H #include <iostream> #include <math.h> #include <float.h> #include <assert.h> #include <string.h> #include <stdio.h> #include <stdint.h> #include <cholUtils.h> #ifndef HALIDE_ATTRIBUTE_ALIGN #ifdef _MSC_VER #define HALIDE_ATTRIBUTE_ALIGN(x) __declspec(align(x)) #else #define HALIDE_ATTRIBUTE_ALIGN(x) __attribute__((aligned(x))) #endif #endif #ifndef BUFFER_T_DEFINED #define BUFFER_T_DEFINED #include <stdbool.h> #include <stdint.h> typedef struct buffer_t { uint64_t dev; uint8_t* host; int32_t extent[4]; int32_t stride[4]; int32_t min[4]; int32_t elem_size; HALIDE_ATTRIBUTE_ALIGN(1) bool host_dirty; HALIDE_ATTRIBUTE_ALIGN(1) bool dev_dirty; HALIDE_ATTRIBUTE_ALIGN(1) uint8_t _padding[10 - sizeof(void *)]; } buffer_t; #endif #define __user_context_ NULL struct halide_filter_metadata_t; extern "C" { void *sympiler_malloc(void *ctx, size_t s){return(malloc(s));} void sympiler_free(void *ctx, void *ptr){free(ptr);}; } #ifdef _WIN32 float roundf(float); double round(double); #else inline float asinh_f32(float x) {return asinhf(x);} inline float acosh_f32(float x) {return acoshf(x);} inline float atanh_f32(float x) {return atanhf(x);} inline double asinh_f64(double x) {return asinh(x);} inline double acosh_f64(double x) {return acosh(x);} inline double atanh_f64(double x) {return atanh(x);} #endif inline float sqrt_f32(float x) {return sqrtf(x);} inline float sin_f32(float x) {return sinf(x);} inline float asin_f32(float x) {return asinf(x);} inline float cos_f32(float x) {return cosf(x);} inline float acos_f32(float x) {return acosf(x);} inline float tan_f32(float x) {return tanf(x);} inline float atan_f32(float x) {return atanf(x);} inline float sinh_f32(float x) {return sinhf(x);} inline float cosh_f32(float x) {return coshf(x);} inline float tanh_f32(float x) {return tanhf(x);} inline float hypot_f32(float x, float y) {return hypotf(x, y);} inline float exp_f32(float x) {return expf(x);} inline float log_f32(float x) {return logf(x);} inline float pow_f32(float x, float y) {return powf(x, y);} inline float floor_f32(float x) {return floorf(x);} inline float ceil_f32(float x) {return ceilf(x);} inline float round_f32(float x) {return roundf(x);} inline double sqrt_f64(double x) {return sqrt(x);} inline double sin_f64(double x) {return sin(x);} inline double asin_f64(double x) {return asin(x);} inline double cos_f64(double x) {return cos(x);} inline double acos_f64(double x) {return acos(x);} inline double tan_f64(double x) {return tan(x);} inline double atan_f64(double x) {return atan(x);} inline double sinh_f64(double x) {return sinh(x);} inline double cosh_f64(double x) {return cosh(x);} inline double tanh_f64(double x) {return tanh(x);} inline double hypot_f64(double x, double y) {return hypot(x, y);} inline double exp_f64(double x) {return exp(x);} inline double log_f64(double x) {return log(x);} inline double pow_f64(double x, double y) {return pow(x, y);} inline double floor_f64(double x) {return floor(x);} inline double ceil_f64(double x) {return ceil(x);} inline double round_f64(double x) {return round(x);} inline float nan_f32() {return NAN;} inline float neg_inf_f32() {return -INFINITY;} inline float inf_f32() {return INFINITY;} inline bool is_nan_f32(float x) {return x != x;} inline bool is_nan_f64(double x) {return x != x;} //inline float float_from_bits(uint32_t bits) { // union { // uint32_t as_uint; // float as_float; // } u; // u.as_uint = bits; // return u.as_float; //} inline int64_t make_int64(int32_t hi, int32_t lo) { return (((int64_t)hi) << 32) | (uint32_t)lo; } inline double make_float64(int32_t i0, int32_t i1) { union { int32_t as_int32[2]; double as_double; } u; u.as_int32[0] = i0; u.as_int32[1] = i1; return u.as_double; } template<typename A, typename B> A reinterpret(B b) {A a; memcpy(&a, &b, sizeof(a)); return a;} double one [2]={1.0,0.}, zero [2]={0.,0.}; int sw = false, lb1 = 0, ub1 = 0; double *cur; int info=0; #ifdef __cplusplus extern "C" { #endif int32_t seqGEMM(double *D, double *B, double *VT, uint64_t *Dptr, uint64_t *Bptr, int32_t *VTptr, int32_t *lchildren, int32_t *rchildren, int32_t *levelset, int32_t *idx, double *mrhs, double *apres, int32_t nrhs, int32_t *Ddim, int32_t *wptr, int32_t *uptr, double *wskel, int32_t *wskeloffset, double *uskel, int32_t *uskeloffset, int32_t *lm, int32_t *slen, int32_t *nidx, int32_t *nidy, int32_t ncount, int32_t *fidx, int32_t *fidy, int32_t fcount, double *utmp, uint64_t *utmpoffset, double *ftmp, uint64_t *ftmpoffset, int depth) { for (int i = 0; i < ncount; i++) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, Ddim[nidx[i]], nrhs, Ddim[nidy[i]], float_from_bits(1065353216 /* 1 */), &D[Dptr[i]], Ddim[nidx[i]], &mrhs[wptr[nidy[i]]], Ddim[nidy[i]], float_from_bits(1065353216/* 0 */), &apres[uptr[nidx[i]]], Ddim[nidx[i]]); } // for i // for (int i = 0; i < ncount; i++) { // for (int j = 0; j < nrhs; j++) { // for (int k = 0; k < Ddim[nidx[i]]; k++) { // int32_t _0 = j * Ddim[nidx[i]]; // int32_t _1 = _0 + k; // int32_t _2 = uptr[nidx[i]] + _1; // int32_t _3 = utmpoffset[i] + _1; // double _4 = apres[_2] + utmp[_3]; // apres[_2] = _4; // } // for k // } // for j // } // for i for (int i = depth - 1; i > -1; i--) { int32_t _0 = i + 1; //#pragma omp parallel for for (int j = levelset[i]; j < levelset[_0]; j++) { // int32_t _1 = k + 1; // for (int j = wpart[k]; j < wpart[_1]; j++) // { // printf("idx=%d\n",idx[j]); int32_t _2 = (int32_t) (4294967295); bool _3 = lchildren[idx[j]] == _2; if (_3) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]], nrhs, Ddim[lm[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]], nrhs, slen[lchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _5 = _4 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]], nrhs, slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[_5], slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else // } // for j } // for k } // for i for (int i = 0; i < fcount; i++) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[fidx[i]], nrhs, slen[fidy[i]], float_from_bits(1065353216 /* 1 */), &B[Bptr[i]], slen[fidx[i]], &wskel[wskeloffset[fidy[i]]], slen[fidy[i]], float_from_bits(1065353216 /* 0 */), &uskel[uskeloffset[fidx[i]]], slen[fidx[i]]); } // for i // for (int i = 0; i < fcount; i++) { // for (int j = 0; j < nrhs; j++) { // for (int k = 0; k < slen[fidx[i]]; k++) { // int32_t _12 = j * slen[fidx[i]]; // int32_t _13 = _12 + k; // int32_t _14 = uskeloffset[fidx[i]] + _13; // int32_t _15 = ftmpoffset[i] + _13; // double _16 = uskel[_14] + ftmp[_15]; // uskel[_14] = _16; // } // for k // } // for j // } // for i for (int i = 0; i < depth; i++) { int32_t _17 = i + 1; //#pragma omp parallel for for (int j = levelset[i]; j < levelset[_17]; j++) { // int32_t _18 = wpart[k] - 1; // int32_t _19 = k + 1; // int32_t _20 = wpart[_19] - 1; // for (int j = _20; j > _18; j--) // { int32_t _21 = (int32_t) (4294967295); bool _22 = lchildren[idx[j]] == _21; if (_22) { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, Ddim[lm[idx[j]]], nrhs, slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]); } // if _22 else { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[lchildren[idx[j]]], nrhs, slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]); int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _24 = _23 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[rchildren[idx[j]]], nrhs, slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[_24], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]); } // if _22 else // } // for j } // for k } // for i return 0; } int32_t blockGEMM(double *D, double *B, double *VT, uint64_t *Dptr, uint64_t *Bptr, int32_t *VTptr, int32_t *lchildren, int32_t *rchildren, int32_t *levelset, int32_t *idx, double *mrhs, double *apres, int32_t nrhs, int32_t *Ddim, int32_t *wptr, int32_t *uptr, double *wskel, int32_t *wskeloffset, double *uskel, int32_t *uskeloffset, int32_t *lm, int32_t *slen, int32_t *nblockSet, int32_t *nblocks, int32_t *npairx, int32_t *npairy, int32_t *fblockSet, int32_t *fblocks, int32_t *fpairx, int32_t *fpairy, int32_t ncount, int32_t fcount, int32_t depth) { mkl_set_dynamic(false); #pragma omp parallel for for (int i = 0; i < ncount; i++) { int32_t _0 = i + 1; for (int j = nblockSet[i]; j < nblockSet[_0]; j++) { int32_t _1 = j + 1; for (int k = nblocks[j]; k < nblocks[_1]; k++) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, Ddim[npairx[k]], nrhs, Ddim[npairy[k]], float_from_bits(1065353216 /* 1 */), &D[Dptr[k]], Ddim[npairx[k]], &mrhs[wptr[npairy[k]]], Ddim[npairy[k]], float_from_bits(1065353216 /* 1 */), &apres[uptr[npairx[k]]], Ddim[npairx[k]]); } // for k } // for j } for (int i = depth - 1; i > -1; i--) { int32_t _0 = i + 1; //#pragma omp parallel for for (int j = levelset[i]; j < levelset[_0]; j++) { // int32_t _1 = k + 1; // for (int j = wpart[k]; j < wpart[_1]; j++) // { // printf("idx=%d\n",idx[j]); int32_t _2 = (int32_t) (4294967295); bool _3 = lchildren[idx[j]] == _2; if (_3) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]], nrhs, Ddim[lm[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]], nrhs, slen[lchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _5 = _4 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]], nrhs, slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[_5], slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else // } // for j } // for k } // for i #pragma omp parallel for for (int i = 0; i < fcount; i++) { int32_t _8 = i + 1; for (int j = fblockSet[i]; j < fblockSet[_8]; j++) { int32_t _9 = j + 1; for (int k = fblocks[j]; k < fblocks[_9]; k++) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[fpairx[k]], nrhs, slen[fpairy[k]], float_from_bits(1065353216 /* 1 */), &B[Bptr[k]], slen[fpairx[k]], &wskel[wskeloffset[fpairy[k]]], slen[fpairy[k]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[fpairx[k]]], slen[fpairx[k]]); } // for k } // for j } // for i for (int i = 0; i < depth; i++) { int32_t _17 = i + 1; //#pragma omp parallel for for (int j = levelset[i]; j < levelset[_17]; j++) { // int32_t _18 = wpart[k] - 1; // int32_t _19 = k + 1; // int32_t _20 = wpart[_19] - 1; // for (int j = _20; j > _18; j--) // { int32_t _21 = (int32_t) (4294967295); bool _22 = lchildren[idx[j]] == _21; if (_22) { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, Ddim[lm[idx[j]]], nrhs, slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]); } // if _22 else { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[lchildren[idx[j]]], nrhs, slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]); int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _24 = _23 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[rchildren[idx[j]]], nrhs, slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[_24], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]); } // if _22 else // } // for j } // for k } // for i return 0; } int32_t CBHGEMM( double *D, double *B, double *VT, uint64_t *Dptr, uint64_t *Bptr, int32_t *VTptr, int32_t *lchildren, int32_t *rchildren, int32_t *levelset, int32_t *idx, double *mrhs, double *apres, int32_t nrhs, int32_t *Ddim, int32_t *wptr, int32_t *uptr, double *wskel, int32_t *wskeloffset, double *uskel, int32_t *uskeloffset, int32_t *lm, int32_t *slen, int32_t *nblockSet, int32_t *nblocks, int32_t *npairx, int32_t *npairy, int32_t *fblockSet, int32_t *fblocks, int32_t *fpairx, int32_t *fpairy, int32_t ncount, int32_t fcount, int32_t *wpart, int32_t *clevelset, int cdepth) { mkl_set_dynamic(false); #pragma omp parallel for for (int i = 0; i < ncount; i++) { int32_t _0 = i + 1; for (int j = nblockSet[i]; j < nblockSet[_0]; j++) { int32_t _1 = j + 1; for (int k = nblocks[j]; k < nblocks[_1]; k++) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, Ddim[npairx[k]], nrhs, Ddim[npairy[k]], float_from_bits(1065353216 /* 1 */), &D[Dptr[k]], Ddim[npairx[k]], &mrhs[wptr[npairy[k]]], Ddim[npairy[k]], float_from_bits(1065353216 /* 1 */), &apres[uptr[npairx[k]]], Ddim[npairx[k]]); } // for k } // for j } for (int i = 0; i < cdepth; i++) { int32_t _0 = i + 1; #pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_0]; k++) { int32_t _1 = k + 1; for (int j = wpart[k]; j < wpart[_1]; j++) { int32_t _2 = (int32_t)(4294967295); bool _3 = lchildren[idx[j]] == _2; if (_3) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,Ddim[lm[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[lchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _5 = _4 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[_5], slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else } // for j } // for k } // for i #pragma omp parallel for for (int i = 0; i < fcount; i++) { int32_t _8 = i + 1; for (int j = fblockSet[i]; j < fblockSet[_8]; j++) { int32_t _9 = j + 1; for (int k = fblocks[j]; k < fblocks[_9]; k++) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[fpairx[k]], nrhs, slen[fpairy[k]], float_from_bits(1065353216 /* 1 */), &B[Bptr[k]], slen[fpairx[k]], &wskel[wskeloffset[fpairy[k]]], slen[fpairy[k]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[fpairx[k]]], slen[fpairx[k]]); } // for k } // for j } // for i for (int i = cdepth-1; i > -1; i--) { int32_t _17 = i + 1; #pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_17]; k++) { int32_t _18 = wpart[k] - 1; int32_t _19 = k + 1; int32_t _20 = wpart[_19] - 1; for (int j = _20; j > _18; j--) { int32_t _21 = (int32_t)(4294967295); bool _22 = lchildren[idx[j]] == _21; if (_22) { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, Ddim[lm[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]); } // if _22 else { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[lchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]); int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _24 = _23 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[rchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[_24], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]); } // if _22 else } // for j } // for k } // for i return 0; } int32_t lowH2GEMM( double *D, double *B, double *VT, uint64_t *Dptr, uint64_t *Bptr, int32_t *VTptr, int32_t *lchildren, int32_t *rchildren, int32_t *levelset, int32_t *idx, double *mrhs, double *apres, int32_t nrhs, int32_t *Ddim, int32_t *wptr, int32_t *uptr, double *wskel, int32_t *wskeloffset, double *uskel, int32_t *uskeloffset, int32_t *lm, int32_t *slen, int32_t *nblockSet, int32_t *nblocks, int32_t *npairx, int32_t *npairy, int32_t *fblockSet, int32_t *fblocks, int32_t *fpairx, int32_t *fpairy, int32_t ncount, int32_t fcount, int32_t *wpart, int32_t *clevelset, int cdepth ){ int nstop = ncount/12*12; #pragma omp parallel for for (int i = 0; i < nstop; i++) { int32_t _0 = i + 1; for (int j = nblockSet[i]; j < nblockSet[_0]; j++) { int32_t _1 = j + 1; for (int k = nblocks[j]; k < nblocks[_1]; k++) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, Ddim[npairx[k]], nrhs, Ddim[npairy[k]], float_from_bits(1065353216 /* 1 */), &D[Dptr[k]], Ddim[npairx[k]], &mrhs[wptr[npairy[k]]], Ddim[npairy[k]], float_from_bits(1065353216 /* 1 */), &apres[uptr[npairx[k]]], Ddim[npairx[k]]); } // for k } // for j } mkl_set_dynamic(true); mkl_set_num_threads(12); if(nstop<ncount){ for (int i = nstop; i < ncount; i++) { int32_t _0 = i + 1; for (int j = nblockSet[i]; j < nblockSet[_0]; j++) { int32_t _1 = j + 1; for (int k = nblocks[j]; k < nblocks[_1]; k++) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, Ddim[npairx[k]], nrhs, Ddim[npairy[k]], float_from_bits(1065353216 /* 1 */), &D[Dptr[k]], Ddim[npairx[k]], &mrhs[wptr[npairy[k]]], Ddim[npairy[k]], float_from_bits(1065353216 /* 1 */), &apres[uptr[npairx[k]]], Ddim[npairx[k]]); } // for k } // for j } } #pragma omp parallel for for(int k = clevelset[0]; k<clevelset[1]; k++) { for(int j = wpart[k]; j<wpart[k+1]; j++) { int32_t _2 = (int32_t)(4294967295); bool _3 = lchildren[idx[j]] == _2; if (_3) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,Ddim[lm[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[lchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _5 = _4 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[_5], slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else } } //#pragma omp parallel for // for(int k = clevelset[0]; k<clevelset[1]; k++) // { // for(int j = wpart[k]; j<wpart[k+1]; j++) // { // int32_t _2 = (int32_t)(4294967295); // bool _3 = lchildren[idx[j]] == _2; // if (_3) // { // cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, // slen[idx[j]],nrhs,Ddim[lm[idx[j]]], // float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], // slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */), // &wskel[wskeloffset[idx[j]]], slen[idx[j]]); // } // if _3 // else // { // cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, // slen[idx[j]],nrhs,slen[lchildren[idx[j]]], // float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], // slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */), // &wskel[wskeloffset[idx[j]]], slen[idx[j]]); // int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]]; // int32_t _5 = _4 + VTptr[idx[j]]; // cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, // slen[idx[j]],nrhs,slen[rchildren[idx[j]]], // float_from_bits(1065353216 /* 1 */), &VT[_5], // slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), // &wskel[wskeloffset[idx[j]]], slen[idx[j]]); // } // if _3 else // } // } for (int i = 1; i < cdepth-2; i++) { int32_t _0 = i + 1; #pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_0]; k++) { int32_t _1 = k + 1; for (int j = wpart[k]; j < wpart[_1]; j++) { { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[lchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _5 = _4 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[_5], slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else } // for j } // for k } // for i mkl_set_num_threads(12); for (int i = cdepth-2; i < cdepth; i++) { int32_t _0 = i + 1; //#pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_0]; k++) { int32_t _1 = k + 1; for (int j = wpart[k]; j < wpart[_1]; j++) { { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[lchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _5 = _4 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[_5], slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else } // for j } // for k } // for i int fstop = fcount/12*12; #pragma omp parallel for for (int i = 0; i < fstop; i++) { int32_t _8 = i + 1; for (int j = fblockSet[i]; j < fblockSet[_8]; j++) { int32_t _9 = j + 1; for (int k = fblocks[j]; k < fblocks[_9]; k++) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[fpairx[k]], nrhs, slen[fpairy[k]], float_from_bits(1065353216 /* 1 */), &B[Bptr[k]], slen[fpairx[k]], &wskel[wskeloffset[fpairy[k]]], slen[fpairy[k]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[fpairx[k]]], slen[fpairx[k]]); } // for k } // for j } // for i mkl_set_num_threads(12); if(fstop<fcount){ for (int i = fstop; i < fcount; ++i) { int32_t _8 = i + 1; for (int j = fblockSet[i]; j < fblockSet[_8]; j++) { int32_t _9 = j + 1; for (int k = fblocks[j]; k < fblocks[_9]; k++) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[fpairx[k]], nrhs, slen[fpairy[k]], float_from_bits(1065353216 /* 1 */), &B[Bptr[k]], slen[fpairx[k]], &wskel[wskeloffset[fpairy[k]]], slen[fpairy[k]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[fpairx[k]]], slen[fpairx[k]]); } // for k } // for j } } for (int i = cdepth-1; i > cdepth-3; i--) { int32_t _17 = i + 1; //#pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_17]; k++) { int32_t _18 = wpart[k] - 1; int32_t _19 = k + 1; int32_t _20 = wpart[_19] - 1; for (int j = _20; j > _18; j--) { { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[lchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]); int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _24 = _23 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[rchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[_24], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]); } // if _22 else } // for j } // for k } // for i for (int i = cdepth-3; i > 0; i--) { int32_t _17 = i + 1; #pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_17]; k++) { int32_t _18 = wpart[k] - 1; int32_t _19 = k + 1; int32_t _20 = wpart[_19] - 1; for (int j = _20; j > _18; j--) { { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[lchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]); int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _24 = _23 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[rchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[_24], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]); } // if _22 else } // for j } // for k } // for i for (int i = 0; i > -1; i--) { int32_t _17 = i + 1; #pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_17]; k++) { int32_t _18 = wpart[k] - 1; int32_t _19 = k + 1; int32_t _20 = wpart[_19] - 1; for (int j = _20; j > _18; j--) { int32_t _21 = (int32_t)(4294967295); bool _22 = lchildren[idx[j]] == _21; if (_22) { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, Ddim[lm[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]); } // if _22 else { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[lchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]); int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _24 = _23 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[rchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[_24], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]); } // if _22 else } // for j } // for k } // for i return 0; } //int32_t H2GEMM(double *D, // double *B, double *VT, uint64_t *Dptr, uint64_t *Bptr, int32_t *VTptr, int32_t *lchildren, int32_t *rchildren, int32_t *levelset, int32_t *idx, double *mrhs, // double *apres, int32_t nrhs, int32_t *Ddim, int32_t *wptr, int32_t *uptr, double *wskel, int32_t *wskeloffset, double *uskel, int32_t *uskeloffset, int32_t *lm, // int32_t *slen, int32_t *nidx, int32_t *nidy, int32_t ncount, int32_t *fidx, int32_t *fidy, int32_t fcount, double *utmp, uint64_t *utmpoffset, double *ftmp, // uint64_t *ftmpoffset, int cdepth) { //#pragma omp parallel for // for (int i = 0; i < ncount; i++) // { // cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, // Ddim[nidx[i]],nrhs,Ddim[nidy[i]], // float_from_bits(1065353216 /* 1 */), &D[Dptr[i]], // Ddim[nidx[i]], &mrhs[wptr[nidy[i]]], Ddim[nidy[i]], float_from_bits(0 /* 0 */), // &utmp[utmpoffset[i]], Ddim[nidx[i]]); // } // for i // for (int i = 0; i < ncount; i++) // { //#pragma omp parallel for // for (int j = 0; j < nrhs; j++) // { // for (int k = 0; k < Ddim[nidx[i]]; k++) // { // int32_t _0 = j * Ddim[nidx[i]]; // int32_t _1 = _0 + k; // int32_t _2 = uptr[nidx[i]] + _1; // int32_t _3 = utmpoffset[i] + _1; // double _4 = apres[_2] + utmp[_3]; // apres[_2] = _4; // } // for k // } // for j // } // for i //// int32_t _5 = (int32_t)(4294967295); //// int32_t _6 = (int32_t)(6); // for (int i = 0; i < cdepth; i++) // { // int32_t _7 = i + 1; //#pragma omp parallel for // for (int k = clevelset[i]; k < clevelset[_7]; k++) // { // int32_t _1 = k + 1; // for (int j = wpart[k]; j < wpart[_1]; j++) // { // int32_t _2 = (int32_t)(4294967295); // bool _3 = lchildren[idx[j]] == _2; // if (_3) // { // cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, // slen[idx[j]],nrhs,Ddim[lm[idx[j]]], // float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], // slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */), // &wskel[wskeloffset[idx[j]]], slen[idx[j]]); // } // if _3 // else // { // cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, // slen[idx[j]],nrhs,slen[lchildren[idx[j]]], // float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], // slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */), // &wskel[wskeloffset[idx[j]]], slen[idx[j]]); // int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]]; // int32_t _5 = _4 + VTptr[idx[j]]; // cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, // slen[idx[j]],nrhs,slen[rchildren[idx[j]]], // float_from_bits(1065353216 /* 1 */), &VT[_5], // slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), // &wskel[wskeloffset[idx[j]]], slen[idx[j]]); // } // if _3 else // } // } // } // for i //#pragma omp parallel for // for (int i = 0; i < fcount; i++) // { // cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, // slen[fidx[i]],nrhs,slen[fidy[i]], // float_from_bits(1065353216 /* 1 */), &B[Bptr[i]], // slen[fidx[i]], &wskel[wskeloffset[fidy[i]]], slen[fidy[i]], float_from_bits(0 /* 0 */), // &ftmp[ftmpoffset[i]], slen[fidx[i]]); // } // for i // for (int i = 0; i < fcount; i++) // { //#pragma omp parallel for // for (int j = 0; j < nrhs; j++) // { // for (int k = 0; k < slen[fidx[i]]; k++) // { // int32_t _12 = j * slen[fidx[i]]; // int32_t _13 = _12 + k; // int32_t _14 = uskeloffset[fidx[i]] + _13; // int32_t _15 = ftmpoffset[i] + _13; // double _16 = uskel[_14] + ftmp[_15]; // uskel[_14] = _16; // } // for k // } // for j // } // for i // // for (int i = cdepth-1; i > -1; i--) // { // int32_t _17 = i + 1; //#pragma omp parallel for // for (int k = clevelset[i]; k < clevelset[_17]; k++) // { // int32_t _18 = wpart[k] - 1; // int32_t _19 = k + 1; // int32_t _20 = wpart[_19] - 1; // for (int j = _20; j > _18; j--) // { // int32_t _21 = (int32_t)(4294967295); // bool _22 = lchildren[idx[j]] == _21; // if (_22) // { // cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, // Ddim[lm[idx[j]]],nrhs,slen[idx[j]], // float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], // slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), // &apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]); // } // if _22 // else // { // cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, // slen[lchildren[idx[j]]],nrhs,slen[idx[j]], // float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], // slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), // &uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]); // int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]]; // int32_t _24 = _23 + VTptr[idx[j]]; // cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, // slen[rchildren[idx[j]]],nrhs,slen[idx[j]], // float_from_bits(1065353216 /* 1 */), &VT[_24], // slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), // &uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]); // } // if _22 else // } // for j // } // for k // } // for i // // return 0; //} #ifdef __cplusplus } // extern "C" #endif #endif //PROJECT_H2GEMM_H
GB_binop__iseq_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_int64) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_int64) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_int64) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int64) // A*D function (colscale): GB (_AxD__iseq_int64) // D*A function (rowscale): GB (_DxB__iseq_int64) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_int64) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int64) // C=scalar+B GB (_bind1st__iseq_int64) // C=scalar+B' GB (_bind1st_tran__iseq_int64) // C=A+scalar GB (_bind2nd__iseq_int64) // C=A'+scalar GB (_bind2nd_tran__iseq_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_INT64 || GxB_NO_ISEQ_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__iseq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
concurrent-computing.c
#include <stdio.h> #include <omp.h> int executeTask(int j){ const char *str[] = { "Enjoy", "Rosetta", "Code" }; for (int i = 0; i < 3; i++) printf("%d %s \n",j, str[i]); return j; } int main() { volatile int r; #pragma omp parallel for num_threads(3) for ( int j = 0; j < 1000000; ++j) { r = executeTask(j); } return 0; }
libperf_int.h
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2015. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2016. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifndef LIBPERF_INT_H_ #define LIBPERF_INT_H_ #include <tools/perf/api/libperf.h> BEGIN_C_DECLS /** @file libperf_int.h */ #include <ucs/async/async.h> #include <ucs/time/time.h> #include <ucs/sys/math.h> #if _OPENMP #include <omp.h> #endif #define TIMING_QUEUE_SIZE 2048 #define UCT_PERF_TEST_AM_ID 5 #define ADDR_BUF_SIZE 2048 #define UCX_PERF_TEST_FOREACH(perf) \ while (!ucx_perf_context_done(perf)) #define rte_call(_perf, _func, ...) \ ((_perf)->params.rte->_func((_perf)->params.rte_group, ## __VA_ARGS__)) typedef struct ucx_perf_context ucx_perf_context_t; typedef struct uct_peer uct_peer_t; typedef struct ucp_perf_request ucp_perf_request_t; typedef struct ucx_perf_thread_context ucx_perf_thread_context_t; struct ucx_perf_allocator { ucs_memory_type_t mem_type; ucs_status_t (*init)(ucx_perf_context_t *perf); ucs_status_t (*uct_alloc)(const ucx_perf_context_t *perf, size_t length, unsigned flags, uct_allocated_memory_t *alloc_mem); void (*uct_free)(const ucx_perf_context_t *perf, uct_allocated_memory_t *alloc_mem); void (*memcpy)(void *dst, ucs_memory_type_t dst_mem_type, const void *src, ucs_memory_type_t src_mem_type, size_t count); void* (*memset)(void *dst, int value, size_t count); }; struct ucx_perf_context { ucx_perf_params_t params; /* Buffers */ void *send_buffer; void *recv_buffer; /* Measurements */ double start_time_acc; /* accurate start time */ ucs_time_t end_time; /* inaccurate end time (upper bound) */ ucs_time_t prev_time; /* time of previous iteration */ ucs_time_t report_interval; /* interval of showing report */ ucx_perf_counter_t max_iter; /* Measurements of current/previous **report** */ struct { ucx_perf_counter_t msgs; /* number of messages */ ucx_perf_counter_t bytes; /* number of bytes */ ucx_perf_counter_t iters; /* number of iterations */ ucs_time_t time; /* inaccurate time (for median and report interval) */ double time_acc; /* accurate time (for avg latency/bw/msgrate) */ } current, prev; ucs_time_t timing_queue[TIMING_QUEUE_SIZE]; unsigned timing_queue_head; const ucx_perf_allocator_t *allocator; union { struct { ucs_async_context_t async; uct_component_h cmpt; uct_md_h md; uct_worker_h worker; uct_iface_h iface; uct_peer_t *peers; uct_allocated_memory_t send_mem; uct_allocated_memory_t recv_mem; uct_iov_t *iov; } uct; struct { ucp_context_h context; ucx_perf_thread_context_t* tctx; ucp_worker_h worker; ucp_ep_h ep; ucp_rkey_h rkey; unsigned long remote_addr; ucp_mem_h send_memh; ucp_mem_h recv_memh; ucp_dt_iov_t *send_iov; ucp_dt_iov_t *recv_iov; void *am_hdr; } ucp; }; }; struct ucx_perf_thread_context { pthread_t pt; int tid; ucs_status_t status; ucx_perf_context_t perf; ucx_perf_result_t result; }; struct uct_peer { uct_ep_h ep; unsigned long remote_addr; uct_rkey_bundle_t rkey; }; struct ucp_perf_request { void *context; }; typedef struct { ucs_status_t (*setup)(ucx_perf_context_t *perf); void (*cleanup)(ucx_perf_context_t *perf); ucs_status_t (*run)(ucx_perf_context_t *perf); void (*barrier)(ucx_perf_context_t *perf); } ucx_perf_funcs_t; extern ucx_perf_funcs_t ucx_perf_funcs[]; unsigned rte_peer_index(unsigned group_size, unsigned group_index); void ucx_perf_test_start_clock(ucx_perf_context_t *perf); void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index); void uct_perf_iface_flush_b(ucx_perf_context_t *perf); ucs_status_t uct_perf_test_dispatch(ucx_perf_context_t *perf); ucs_status_t ucp_perf_test_dispatch(ucx_perf_context_t *perf); void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result); void uct_perf_barrier(ucx_perf_context_t *perf); void ucp_perf_thread_barrier(ucx_perf_context_t *perf); void ucp_perf_barrier(ucx_perf_context_t *perf); ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf); void ucp_perf_test_free_mem(ucx_perf_context_t *perf); ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf); void uct_perf_test_free_mem(ucx_perf_context_t *perf); ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result); void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf, const ucx_perf_params_t *params); void ucx_perf_set_warmup(ucx_perf_context_t* perf, const ucx_perf_params_t* params); /** * Get the total length of the message size given by parameters */ size_t ucx_perf_get_message_size(const ucx_perf_params_t *params); static UCS_F_ALWAYS_INLINE int ucx_perf_context_done(ucx_perf_context_t *perf) { return ucs_unlikely((perf->current.iters >= perf->max_iter) || (perf->current.time > perf->end_time)); } static inline void ucx_perf_get_time(ucx_perf_context_t *perf) { perf->current.time_acc = ucs_get_accurate_time(); } static inline void ucx_perf_omp_barrier(ucx_perf_context_t *perf) { #if _OPENMP if (perf->params.thread_count > 1) { #pragma omp barrier } #endif } static inline void ucx_perf_update(ucx_perf_context_t *perf, ucx_perf_counter_t iters, size_t bytes) { ucx_perf_result_t result; perf->current.time = ucs_get_time(); perf->current.iters += iters; perf->current.bytes += bytes; perf->current.msgs += 1; perf->timing_queue[perf->timing_queue_head] = perf->current.time - perf->prev_time; ++perf->timing_queue_head; if (perf->timing_queue_head == TIMING_QUEUE_SIZE) { perf->timing_queue_head = 0; } perf->prev_time = perf->current.time; if (perf->current.time - perf->prev.time >= perf->report_interval) { ucx_perf_get_time(perf); ucx_perf_calc_result(perf, &result); rte_call(perf, report, &result, perf->params.report_arg, 0, 0); perf->prev = perf->current; } } END_C_DECLS #endif
mttkrp.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include "hicoo.h" int sptMTTKRPHiCOO_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode); int sptMTTKRPHiCOO_3D_Blocked( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode); int sptMTTKRPHiCOO_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode); int sptMTTKRPHiCOO_4D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode); int sptMTTKRPHiCOO_3D_MatrixTiling_init( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode); /** * Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode * @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R * @param[in] hitsr the HiCOO sparse tensor input * @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary * @param[in] mats_order the order of the Khatri-Rao products * @param[in] mode the mode on which the MTTKRP is performed * @param[in] scratch an temporary array to store intermediate results, space assigned before this function * * This function uses support arbitrary-order sparse tensors with Khatri-Rao * products of dense factor matrices, the output is the updated dense matrix for the "mode". */ int sptMTTKRPHiCOO( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptMTTKRPHiCOO_3D_Blocked(hitsr, mats, mats_order, mode) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; sptValueVector scratch; // Temporary array /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptNewValueVector(&scratch, R, R); sptIndex * block_coord = (sptIndex*)malloc(nmodes * sizeof(*block_coord)); sptIndex * ele_coord = (sptIndex*)malloc(nmodes * sizeof(*ele_coord)); /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Block indices */ for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Element indices */ for(sptIndex m=0; m<nmodes; ++m) ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z]; /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex tmp_i = ele_coord[times_mat_index]; sptValue const entry = vals[z]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; times_mat = mats[times_mat_index]; tmp_i = ele_coord[times_mat_index]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } sptIndex const mode_i = ele_coord[mode]; for(sptIndex r=0; r<R; ++r) { mvals[mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks } // End loop kernels free(block_coord); free(ele_coord); sptFreeValueVector(&scratch); return 0; } /* Very slow version! Slower than COO in Morton order. */ int sptMTTKRPHiCOO_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* block_coord is reused, no need to store ele_coord for 3D tensors */ sptBlockIndex * block_coord = (sptBlockIndex*)malloc(nmodes * sizeof(*block_coord)); sptIndex mode_i; sptIndex tmp_i_1, tmp_i_2; sptValue entry; /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Block indices */ for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { mode_i = (block_coord[mode] << hitsr->sb_bits) + hitsr->einds[mode].data[z]; tmp_i_1 = (block_coord[times_mat_index_1] << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z]; tmp_i_2 = (block_coord[times_mat_index_2] << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z]; entry = vals[z]; for(sptIndex r=0; r<R; ++r) { mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels free(block_coord); return 0; } int sptMTTKRPHiCOO_3D_Blocked( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptElementIndex mode_i; sptElementIndex tmp_i_1, tmp_i_2; sptValue entry; sptValue * restrict blocked_mvals; sptValue * restrict blocked_times_mat_1; sptValue * restrict blocked_times_mat_2; /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { mode_i = hitsr->einds[mode].data[z]; tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; sptValue * const restrict blocked_times_mat_1_row = blocked_times_mat_1 + tmp_i_1 * stride; sptValue * const restrict blocked_times_mat_2_row = blocked_times_mat_2 + tmp_i_2 * stride; for(sptIndex r=0; r<R; ++r) { bmvals_row[r] += entry * blocked_times_mat_1_row[r] * blocked_times_mat_2_row[r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int sptMTTKRPHiCOO_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptMTTKRPHiCOO_3D_MatrixTiling(hitsr, mats, mats_order, mode) == 0); return 0; } // else if(nmodes == 4) { // sptAssert(sptMTTKRPHiCOO_4D_MatrixTiling(hitsr, mats, mats_order, mode) == 0); // return 0; // } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; sptValueVector scratch; // Temporary array /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptNewValueVector(&scratch, R, R); sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Block indices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks } // End loop kernels free(blocked_times_mat); sptFreeValueVector(&scratch); return 0; } int sptMTTKRPHiCOO_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptElementIndex mode_i; sptElementIndex tmp_i_1, tmp_i_2; sptValue entry; sptValue * restrict blocked_mvals; sptValue * restrict blocked_times_mat_1; sptValue * restrict blocked_times_mat_2; /* Loop kernels */ // sptTimer loop_timer, kernel_timer, block_timer, element_timer, elementmat_timer, blockmat_timer; // double loop_etime = 0, kernel_etime = 0, block_etime = 0, element_etime = 0, elementmat_etime = 0, blockmat_etime = 0; // sptNewTimer(&loop_timer, 0); // sptNewTimer(&kernel_timer, 0); // sptNewTimer(&block_timer, 0); // sptNewTimer(&element_timer, 0); // sptNewTimer(&elementmat_timer, 0); // sptNewTimer(&blockmat_timer, 0); // sptStartTimer(loop_timer); for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ // printf("kptr_begin: %"PARTI_PRI_NNZ_INDEX", kptr_end: %"PARTI_PRI_NNZ_INDEX"\n", kptr_begin, kptr_end); // sptStartTimer(kernel_timer); for(sptIndex b=kptr_begin; b<kptr_end; ++b) { // sptStartTimer(blockmat_timer); blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; // sptStopTimer(blockmat_timer); // blockmat_etime += sptElapsedTime(blockmat_timer); // sptPrintElapsedTime(blockmat_timer, "===Blockmat Timer"); /* Loop entries in a block */ // printf("bptr_begin: %"PARTI_PRI_INDEX", bptr_end: %"PARTI_PRI_INDEX"\n", bptr_begin, bptr_end); // sptStartTimer(block_timer); for(sptIndex z=bptr_begin; z<bptr_end; ++z) { // sptStartTimer(elementmat_timer); mode_i = hitsr->einds[mode].data[z]; tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; // mode_i = (sptBlockMatrixIndex)hitsr->einds[mode].data[z]; // tmp_i_1 = (sptBlockMatrixIndex)hitsr->einds[times_mat_index_1].data[z]; // tmp_i_2 = (sptBlockMatrixIndex)hitsr->einds[times_mat_index_2].data[z]; entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; sptValue * const restrict blocked_times_mat_1_row = blocked_times_mat_1 + tmp_i_1 * stride; sptValue * const restrict blocked_times_mat_2_row = blocked_times_mat_2 + tmp_i_2 * stride; // sptStopTimer(elementmat_timer); // elementmat_etime += sptElapsedTime(elementmat_timer); // sptPrintElapsedTime(elementmat_timer, "===Elementmat Timer"); // sptStartTimer(element_timer); #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { // blocked_mvals[mode_i * stride + r] += entry * // blocked_times_mat_1[tmp_i_1 * stride + r] * // blocked_times_mat_2[tmp_i_2 * stride + r]; bmvals_row[r] += entry * blocked_times_mat_1_row[r] * blocked_times_mat_2_row[r]; } // sptStopTimer(element_timer); // element_etime += sptElapsedTime(element_timer); // sptPrintElapsedTime(element_timer, "===Element Timer"); } // End loop entries // sptStopTimer(block_timer); // block_etime += sptElapsedTime(block_timer); // sptPrintElapsedTime(block_timer, "==Block Timer"); } // End loop blocks // sptStopTimer(kernel_timer); // kernel_etime += sptElapsedTime(kernel_timer); // sptPrintElapsedTime(kernel_timer, "=Kernel Timer"); } // End loop kernels // sptStopTimer(loop_timer); // loop_etime += sptElapsedTime(loop_timer); // sptPrintElapsedTime(loop_timer, "=Loop Timer"); // printf("\nTotal Elementmat Time: %lf\n", elementmat_etime); // printf("Total Element Time: %lf\n", element_etime); // printf("Total Blockmat Time: %lf\n", blockmat_etime); // printf("Total Block Time: %lf\n", block_etime); // printf("Total Kernel Time: %lf\n", kernel_etime); // printf("Total Loop Time: %lf\n\n", loop_etime); // sptFreeTimer(loop_timer); // sptFreeTimer(kernel_timer); // sptFreeTimer(block_timer); // sptFreeTimer(element_timer); // sptFreeTimer(elementmat_timer); // sptFreeTimer(blockmat_timer); return 0; } int sptMTTKRPHiCOO_4D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes == 4); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex times_mat_index_3 = mats_order[3]; sptRankMatrix * restrict times_mat_3 = mats[times_mat_index_3]; sptElementIndex mode_i; sptElementIndex tmp_i_1, tmp_i_2, tmp_i_3; sptValue entry; sptValue * restrict blocked_mvals; sptValue * restrict blocked_times_mat_1; sptValue * restrict blocked_times_mat_2; sptValue * restrict blocked_times_mat_3; /* Loop kernels */ // sptTimer loop_timer, kernel_timer, block_timer, element_timer, elementmat_timer, blockmat_timer; // double loop_etime = 0, kernel_etime = 0, block_etime = 0, element_etime = 0, elementmat_etime = 0, blockmat_etime = 0; // sptNewTimer(&loop_timer, 0); // sptNewTimer(&kernel_timer, 0); // sptNewTimer(&block_timer, 0); // sptNewTimer(&element_timer, 0); // sptNewTimer(&elementmat_timer, 0); // sptNewTimer(&blockmat_timer, 0); // sptStartTimer(loop_timer); for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ // printf("kptr_begin: %"PARTI_PRI_NNZ_INDEX", kptr_end: %"PARTI_PRI_NNZ_INDEX"\n", kptr_begin, kptr_end); // sptStartTimer(kernel_timer); for(sptIndex b=kptr_begin; b<kptr_end; ++b) { // sptStartTimer(blockmat_timer); blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; blocked_times_mat_3 = times_mat_3->values + (hitsr->binds[times_mat_index_3].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; // sptStopTimer(blockmat_timer); // blockmat_etime += sptElapsedTime(blockmat_timer); // sptPrintElapsedTime(blockmat_timer, "===Blockmat Timer"); /* Loop entries in a block */ // printf("bptr_begin: %"PARTI_PRI_INDEX", bptr_end: %"PARTI_PRI_INDEX"\n", bptr_begin, bptr_end); // sptStartTimer(block_timer); for(sptIndex z=bptr_begin; z<bptr_end; ++z) { // sptStartTimer(elementmat_timer); mode_i = hitsr->einds[mode].data[z]; tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; tmp_i_3 = hitsr->einds[times_mat_index_3].data[z]; // mode_i = (sptBlockMatrixIndex)hitsr->einds[mode].data[z]; // tmp_i_1 = (sptBlockMatrixIndex)hitsr->einds[times_mat_index_1].data[z]; // tmp_i_2 = (sptBlockMatrixIndex)hitsr->einds[times_mat_index_2].data[z]; // tmp_i_3 = (sptBlockMatrixIndex)hitsr->einds[times_mat_index_3].data[z]; entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; sptValue * const restrict blocked_times_mat_1_row = blocked_times_mat_1 + tmp_i_1 * stride; sptValue * const restrict blocked_times_mat_2_row = blocked_times_mat_2 + tmp_i_2 * stride; sptValue * const restrict blocked_times_mat_3_row = blocked_times_mat_3 + tmp_i_3 * stride; // sptStopTimer(elementmat_timer); // elementmat_etime += sptElapsedTime(elementmat_timer); // sptPrintElapsedTime(elementmat_timer, "===Elementmat Timer"); // sptStartTimer(element_timer); #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { // blocked_mvals[mode_i * stride + r] += entry * // blocked_times_mat_1[tmp_i_1 * stride + r] * // blocked_times_mat_2[tmp_i_2 * stride + r] * // blocked_times_mat_3[tmp_i_3 * stride + r]; bmvals_row[r] += entry * blocked_times_mat_1_row[r] * blocked_times_mat_2_row[r] * blocked_times_mat_3_row[r]; } // sptStopTimer(element_timer); // element_etime += sptElapsedTime(element_timer); // sptPrintElapsedTime(element_timer, "===Element Timer"); } // End loop entries // sptStopTimer(block_timer); // block_etime += sptElapsedTime(block_timer); // sptPrintElapsedTime(block_timer, "==Block Timer"); } // End loop blocks // sptStopTimer(kernel_timer); // kernel_etime += sptElapsedTime(kernel_timer); // sptPrintElapsedTime(kernel_timer, "=Kernel Timer"); } // End loop kernels // sptStopTimer(loop_timer); // loop_etime += sptElapsedTime(loop_timer); // sptPrintElapsedTime(loop_timer, "=Loop Timer"); // printf("\nTotal Elementmat Time: %lf\n", elementmat_etime); // printf("Total Element Time: %lf\n", element_etime); // printf("Total Blockmat Time: %lf\n", blockmat_etime); // printf("Total Block Time: %lf\n", block_etime); // printf("Total Kernel Time: %lf\n", kernel_etime); // printf("Total Loop Time: %lf\n\n", loop_etime); // sptFreeTimer(loop_timer); // sptFreeTimer(kernel_timer); // sptFreeTimer(block_timer); // sptFreeTimer(element_timer); // sptFreeTimer(elementmat_timer); // sptFreeTimer(blockmat_timer); return 0; } int sptMTTKRPHiCOO_3D_MatrixTiling_init( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptElementIndex mode_i; sptElementIndex tmp_i_1, tmp_i_2; sptValue entry; sptValue * blocked_mvals; sptValue * blocked_times_mat_1; sptValue * blocked_times_mat_2; /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { mode_i = hitsr->einds[mode].data[z]; tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; entry = vals[z]; for(sptIndex r=0; r<R; ++r) { blocked_mvals[mode_i * stride + r] += entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; }
striad.c
/* * ======================================================================================= * * Author: Jan Eitzinger (je), jan.eitzinger@fau.de * Copyright (c) 2020 RRZE, University Erlangen-Nuremberg * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ======================================================================================= */ #include <timing.h> double striad( double * restrict a, double * restrict b, double * restrict c, double * restrict d, int N ) { double S, E; S = getTimeStamp(); #pragma omp parallel for schedule(static) for (int i=0; i<N; i++) { a[i] = b[i] + d[i] * c[i]; } E = getTimeStamp(); return E-S; }
brox_optic_flow.h
// This program is free software: you can use, modify and/or redistribute it // under the terms of the simplified BSD License. You should have received a // copy of this license along this program. If not, see // <http://www.opensource.org/licenses/bsd-license.html>. // // Copyright (C) 2012, Javier Sánchez Pérez <jsanchez@dis.ulpgc.es> // All rights reserved. #ifndef BROX_OPTIC_FLOW_H #define BROX_OPTIC_FLOW_H #include <omp.h> #include <vector> #include <iostream> #include "mask.h" #include "zoom.h" #include "bicubic_interpolation.h" #define EPSILON 0.001 #define MAXITER 300 #define SOR_PARAMETER 1.9 #define GAUSSIAN_SIGMA 0.8 /** * * Compute the coefficients of the robust functional (data term) * **/ void psi_data (const float *I1, //first image const float *I2, //second image const float *I2x, //gradient of the second image const float *I2y, //gradient of the second image const float *du, //motion increment const float *dv, //motion increment float *psip, //output coefficients const int nx, //image width const int ny //image height ) { const int size = nx * ny; //compute 1/(sqrt((I2-I1+I2x*du+I2y*dv)²+e²) in each pixel //(equation (5) in the article) #pragma omp parallel for for (int i = 0; i < size; i++) { const float dI = I2[i] - I1[i] + I2x[i] * du[i] + I2y[i] * dv[i]; const float dI2 = dI * dI; psip[i] = 1. / sqrt (dI2 + EPSILON * EPSILON); } } /** * * Compute the coefficients of the robust functional (gradient term) * **/ void psi_gradient (const float *I1x, //gradient of the first image const float *I1y, //gradient of the first image const float *I2x, //gradient of the second image const float *I2y, //gradient of the second image const float *I2xx, //second derivatives of the second image const float *I2xy, //second derivatives of the second image const float *I2yy, //second derivatives of the second image const float *du, //motion increment const float *dv, //motion increment float *psip, //output coefficients const int nx, //image width const int ny //image height ) { const int size = nx * ny; //compute 1/(sqrt(|DI2-DI1+HI2*(du,dv)|²+e²) in each pixel //(equation (5) in the article) #pragma omp parallel for for (int i = 0; i < size; i++) { const float dIx = I2x[i] - I1x[i] + I2xx[i] * du[i] + I2xy[i] * dv[i]; const float dIy = I2y[i] - I1y[i] + I2xy[i] * du[i] + I2yy[i] * dv[i]; const float dI2 = dIx * dIx + dIy * dIy; psip[i] = 1. / sqrt (dI2 + EPSILON * EPSILON); } } /** * * Compute the coefficients of the robust functional (smoothness term) * **/ void psi_smooth (const float *ux, //gradient of x component of the optical flow const float *uy, //gradient of x component of the optical flow const float *vx, //gradient of y component of the optical flow const float *vy, //gradient of y component of the optical flow float *psi, //output coefficients const int nx, //image width const int ny //image height ) { const int size = nx * ny; //compute 1/(sqrt(ux²+uy²+vx²+vy²+e²) in each pixel //(equation (5) in the article) #pragma omp parallel for for (int i = 0; i < size; i++) { const float du = ux[i] * ux[i] + uy[i] * uy[i]; const float dv = vx[i] * vx[i] + vy[i] * vy[i]; const float d2 = du + dv; psi[i] = 1. / sqrt (d2 + EPSILON * EPSILON); } } /** * * SOR iteration in one position * */ inline float sor_iteration (const float *Au, //constant part of the numerator of u const float *Av, //constant part of the numerator of v const float *Du, //denominator of u const float *Dv, //denominator of v const float *D, //constant part of the numerator float *du, //x component of the motion increment float *dv, //y component of the motion increment const float alpha, //alpha smoothness parameter const float *psi1, //coefficients of the divergence const float *psi2, const float *psi3, const float *psi4, const int i, //current row const int i0, //previous row const int i1, //following row const int j, //current column const int nx, //number of columns const int j0, //previous column const int j1 //following column ) { //set the SOR extrapolation parameter const float w = SOR_PARAMETER; //calculate the position in the array const int k = i * nx + j; //compute the divergence part of the numerator (equation (10)) const float div_du = psi1[k] * du[k + i1] + psi2[k] * du[k - i0] + psi3[k] * du[k + j1] + psi4[k] * du[k - j0]; const float div_dv = psi1[k] * dv[k + i1] + psi2[k] * dv[k - i0] + psi3[k] * dv[k + j1] + psi4[k] * dv[k - j0]; const float duk = du[k]; const float dvk = dv[k]; //update the motion increment (equation (12)) du[k] = (1. - w) * du[k] + w * (Au[k] - D[k] * dv[k] + alpha * div_du) / Du[k]; dv[k] = (1. - w) * dv[k] + w * (Av[k] - D[k] * du[k] + alpha * div_dv) / Dv[k]; //return the covergence error in this position (equation (13)) return (du[k] - duk) * (du[k] - duk) + (dv[k] - dvk) * (dv[k] - dvk); } /** * * Compute the optic flow with the Brox spatial method * * **/ void brox_optic_flow (const float *I1, //first image const float *I2, //second image float *u, //x component of the optical flow float *v, //y component of the optical flow const int nx, //image width const int ny, //image height const float alpha, //smoothness parameter const float gamma, //gradient term parameter const float TOL, //stopping criterion threshold const int inner_iter, //number of inner iterations const int outer_iter, //number of outer iterations const int number_of_threads, // number of threads for the parallel code const bool verbose //switch on messages ) { const int size = nx * ny; //allocate memory float *du = new float[size]; float *dv = new float[size]; float *ux = new float[size]; float *uy = new float[size]; float *vx = new float[size]; float *vy = new float[size]; float *I1x = new float[size]; float *I1y = new float[size]; float *I2x = new float[size]; float *I2y = new float[size]; float *I2w = new float[size]; float *I2wx = new float[size]; float *I2wy = new float[size]; float *I2xx = new float[size]; float *I2yy = new float[size]; float *I2xy = new float[size]; float *I2wxx = new float[size]; float *I2wyy = new float[size]; float *I2wxy = new float[size]; float *div_u = new float[size]; float *div_v = new float[size]; float *div_d = new float[size]; float *Au = new float[size]; float *Av = new float[size]; float *Du = new float[size]; float *Dv = new float[size]; float *D = new float[size]; float *psid = new float[size]; float *psig = new float[size]; float *psis = new float[size]; float *psi1 = new float[size]; float *psi2 = new float[size]; float *psi3 = new float[size]; float *psi4 = new float[size]; //compute the gradient of the images gradient (I1, I1x, I1y, nx, ny); gradient (I2, I2x, I2y, nx, ny); //compute second order derivatives Dxx (I2, I2xx, nx, ny); Dyy (I2, I2yy, nx, ny); Dxy (I2, I2xy, nx, ny); //outer iterations loop for (int no = 0; no < outer_iter; no++) { //warp the second image and its derivatives bicubic_interpolation (I2, u, v, I2w, nx, ny, true); bicubic_interpolation (I2x, u, v, I2wx, nx, ny, true); bicubic_interpolation (I2y, u, v, I2wy, nx, ny, true); bicubic_interpolation (I2xx, u, v, I2wxx, nx, ny, true); bicubic_interpolation (I2xy, u, v, I2wxy, nx, ny, true); bicubic_interpolation (I2yy, u, v, I2wyy, nx, ny, true); //compute the flow gradient gradient (u, ux, uy, nx, ny); gradient (v, vx, vy, nx, ny); //compute robust function Psi for the smoothness term psi_smooth (ux, uy, vx, vy, psis, nx, ny); //compute coefficients of Psi functions in divergence psi_divergence (psis, psi1, psi2, psi3, psi4, nx, ny); //compute the divergence for the gradient of w (equation (8)) divergence_u (u, v, psi1, psi2, psi3, psi4, div_u, div_v, nx, ny); #pragma omp parallel for for (int i = 0; i < size; i++) { //compute the coefficents of dw[i] in the smoothness term //(equation (10)) div_d[i] = alpha * (psi1[i] + psi2[i] + psi3[i] + psi4[i]); //initialize the motion increment du[i] = dv[i] = 0; } //inner iterations loop for (int ni = 0; ni < inner_iter; ni++) { //compute robust function Psi for the data and gradient terms psi_data (I1, I2w, I2wx, I2wy, du, dv, psid, nx, ny); psi_gradient (I1x, I1y, I2wx, I2wy, I2wxx, I2wxy, I2wyy, du, dv, psig, nx, ny); //store constant parts of the numerical scheme (equation (11)) for (int i = 0; i < size; i++) { const float p = psid[i]; const float g = gamma * psig[i]; //brightness constancy term const float dif = I2w[i] - I1[i]; const float BNu = -p * dif * I2wx[i]; const float BNv = -p * dif * I2wy[i]; const float BDu = p * I2wx[i] * I2wx[i]; const float BDv = p * I2wy[i] * I2wy[i]; //gradient constancy term const float dx = (I2wx[i] - I1x[i]); const float dy = (I2wy[i] - I1y[i]); const float GNu = -g * (dx * I2wxx[i] + dy * I2wxy[i]); const float GNv = -g * (dx * I2wxy[i] + dy * I2wyy[i]); const float GDu = g * (I2wxx[i] * I2wxx[i] + I2wxy[i] * I2wxy[i]); const float GDv = g * (I2wyy[i] * I2wyy[i] + I2wxy[i] * I2wxy[i]); const float DI = (I2wxx[i] + I2wyy[i]) * I2wxy[i]; const float Duv = p * I2wy[i] * I2wx[i] + g * DI; Au[i] = BNu + GNu + alpha * div_u[i]; Av[i] = BNv + GNv + alpha * div_v[i]; Du[i] = BDu + GDu + div_d[i]; Dv[i] = BDv + GDv + div_d[i]; D[i] = Duv; } //sor iterations loop float error = 1000; int nsor = 0; while (error > TOL && nsor < MAXITER) { error = 0; nsor++; //update the motion increment in the center of the images #pragma omp parallel for reduction(+:error) num_threads((number_of_threads < (ny-3)) ? number_of_threads : (ny-3)) for (int i = 1; i < ny - 1; i++) { for (int j = 1; j < nx - 1; j++) error += sor_iteration (Au, Av, Du, Dv, D, du, dv, alpha, psi1, psi2, psi3, psi4, i, nx, nx, j, nx, 1, 1); } //update the motion increment in the first and last rows for (int j = 1; j < nx - 1; j++) { error += sor_iteration (Au, Av, Du, Dv, D, du, dv, alpha, psi1, psi2, psi3, psi4, 0, 0, nx, j, nx, 1, 1); error += sor_iteration (Au, Av, Du, Dv, D, du, dv, alpha, psi1, psi2, psi3, psi4, ny - 1, nx, 0, j, nx, 1, 1); } //update the motion increment in the first and last columns for (int i = 1; i < ny - 1; i++) { error += sor_iteration (Au, Av, Du, Dv, D, du, dv, alpha, psi1, psi2, psi3, psi4, i, nx, nx, 0, nx, 0, 1); error += sor_iteration (Au, Av, Du, Dv, D, du, dv, alpha, psi1, psi2, psi3, psi4, i, nx, nx, nx - 1, nx, 1, 0); } //process the top-left corner (0,0) error += sor_iteration (Au, Av, Du, Dv, D, du, dv, alpha, psi1, psi2, psi3, psi4, 0, 0, nx, 0, nx, 0, 1); //process the top-right corner (0,nx-1) error += sor_iteration (Au, Av, Du, Dv, D, du, dv, alpha, psi1, psi2, psi3, psi4, 0, 0, nx, nx - 1, nx, 1, 0); //process the bottom-left corner (ny-1,0) error += sor_iteration (Au, Av, Du, Dv, D, du, dv, alpha, psi1, psi2, psi3, psi4, ny - 1, nx, 0, 0, nx, 0, 1); //process the bottom-right corner (ny-1,nx-1) error += sor_iteration (Au, Av, Du, Dv, D, du, dv, alpha, psi1, psi2, psi3, psi4, ny - 1, nx, 0, nx - 1, nx, 1, 0); error = sqrt (error / size); } if (verbose) std::cout << "Iterations: " << nsor << std::endl; } //update the flow with the estimated motion increment for (int i = 0; i < size; i++) { u[i] += du[i]; v[i] += dv[i]; } } //delete allocated memory delete[]du; delete[]dv; delete[]ux; delete[]uy; delete[]vx; delete[]vy; delete[]I1x; delete[]I1y; delete[]I2x; delete[]I2y; delete[]I2w; delete[]I2wx; delete[]I2wy; delete[]I2xx; delete[]I2yy; delete[]I2xy; delete[]I2wxx; delete[]I2wyy; delete[]I2wxy; delete[]div_u; delete[]div_v; delete[]div_d; delete[]Au; delete[]Av; delete[]Du; delete[]Dv; delete[]D; delete[]psid; delete[]psig; delete[]psis; delete[]psi1; delete[]psi2; delete[]psi3; delete[]psi4; } /** * * Function to normalize the images between 0 and 255 * **/ void image_normalization (const float *I1, //input image 1 const float *I2, //input image 2 float *I1n, //normalized output image 1 float *I2n, //normalized output image 2 int size //size of the image ) { //compute the max and min values of the images const float max0 = *std::max_element (I1, &I1[size]); const float max1 = *std::max_element (I2, &I2[size]); const float min0 = *std::min_element (I1, &I1[size]); const float min1 = *std::min_element (I2, &I2[size]); //compute the global max and min const float max = std::max (max0, max1); const float min = std::min (min0, min1); const float den = max - min; if (den > 0) //normalize the images between 0 and 255 #pragma omp parallel for for (int i = 0; i < size; i++) { I1n[i] = 255.0 * (I1[i] - min) / den; I2n[i] = 255.0 * (I2[i] - min) / den; } else //copy the original data #pragma omp parallel for for (int i = 0; i < size; i++) { I1n[i] = I1[i]; I2n[i] = I2[i]; } } /** * * Multiscale approach for computing the optical flow * **/ void brox_optic_flow (const float *I1, //first image const float *I2, //second image float *u, //x component of the optical flow float *v, //y component of the optical flow const int nxx, //image width const int nyy, //image height const float alpha, //smoothness parameter const float gamma, //gradient term parameter const int nscales, //number of scales const float nu, //downsampling factor const float TOL, //stopping criterion threshold const int inner_iter, //number of inner iterations const int outer_iter, //number of outer iterations const bool verbose //switch on messages ) { int size = nxx * nyy; std::vector < float *>I1s (nscales); std::vector < float *>I2s (nscales); std::vector < float *>us (nscales); std::vector < float *>vs (nscales); std::vector < int >nx (nscales); std::vector < int >ny (nscales); I1s[0] = new float[size]; I2s[0] = new float[size]; //normalize the input images between 0 and 255 image_normalization (I1, I2, I1s[0], I2s[0], size); //presmoothing the finest scale images gaussian (I1s[0], nxx, nyy, GAUSSIAN_SIGMA); gaussian (I2s[0], nxx, nyy, GAUSSIAN_SIGMA); us[0] = u; vs[0] = v; nx[0] = nxx; ny[0] = nyy; //create the scales for (int s = 1; s < nscales; s++) { zoom_size (nx[s - 1], ny[s - 1], nx[s], ny[s], nu); const int sizes = nx[s] * ny[s]; I1s[s] = new float[sizes]; I2s[s] = new float[sizes]; us[s] = new float[sizes]; vs[s] = new float[sizes]; //compute the zoom from the previous scale zoom_out (I1s[s - 1], I1s[s], nx[s - 1], ny[s - 1], nu); zoom_out (I2s[s - 1], I2s[s], nx[s - 1], ny[s - 1], nu); } //initialization of the optical flow at the coarsest scale for (int i = 0; i < nx[nscales - 1] * ny[nscales - 1]; i++) us[nscales - 1][i] = vs[nscales - 1][i] = 0.0; int number_of_threads = 0; #pragma omp parallel reduction(+:number_of_threads) number_of_threads += 1; //pyramidal approach for computing the optical flow for (int s = nscales - 1; s >= 0; s--) { if (verbose) std::cout << "Scale: " << s << std::endl; //compute the optical flow for the current scale brox_optic_flow (I1s[s], I2s[s], us[s], vs[s], nx[s], ny[s], alpha, gamma, TOL, inner_iter, outer_iter, number_of_threads, verbose); //if it is not the finer scale, then upsample the optical flow and adapt it conveniently if (s) { zoom_in (us[s], us[s - 1], nx[s], ny[s], nx[s - 1], ny[s - 1]); zoom_in (vs[s], vs[s - 1], nx[s], ny[s], nx[s - 1], ny[s - 1]); for (int i = 0; i < nx[s - 1] * ny[s - 1]; i++) { us[s - 1][i] *= 1.0 / nu; vs[s - 1][i] *= 1.0 / nu; } } } //delete allocated memory delete[]I1s[0]; delete[]I2s[0]; for (int i = 1; i < nscales; i++) { delete[]I1s[i]; delete[]I2s[i]; delete[]us[i]; delete[]vs[i]; } } #endif
libperf.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED. * Copyright (C) ARM Ltd. 2017-2020. ALL RIGHTS RESERVED. * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <ucs/debug/log.h> #include <ucs/arch/bitops.h> #include <ucs/sys/module.h> #include <ucs/sys/string.h> #include <string.h> #include <tools/perf/lib/libperf_int.h> #include <unistd.h> #if _OPENMP #include <omp.h> #endif /* _OPENMP */ #define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \ _status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \ if (_status != UCS_OK) { \ ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support atomic %s for " \ "message size %zu bytes", UCT_PERF_TEST_PARAMS_ARG(_params), \ (_msg)[_op], (_size)); \ return _status; \ } #define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \ if (!ucs_test_all_flags(_attr, _required)) { \ if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \ ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support required " \ #_size"-bit atomic: %s", UCT_PERF_TEST_PARAMS_ARG(_params), \ (_msg)[ucs_ffs64(~(_attr) & (_required))]); \ } \ return UCS_ERR_UNSUPPORTED; \ } typedef struct { union { struct { size_t dev_addr_len; size_t iface_addr_len; size_t ep_addr_len; } uct; struct { size_t worker_addr_len; size_t total_wireup_len; } ucp; }; size_t rkey_size; unsigned long recv_buffer; } ucx_perf_ep_info_t; const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_LAST]; static const char *perf_iface_ops[] = { [ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short", [ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy", [ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy", [ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short", [ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy", [ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy", [ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short", [ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy", [ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy", [ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler", [ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface", [ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep", [ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability", [ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback", [ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback", [ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy", [ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy", [ucs_ilog2(UCT_IFACE_FLAG_EP_CHECK)] = "ep check", [ucs_ilog2(UCT_IFACE_FLAG_EP_KEEPALIVE)] = "ep keepalive" }; static const char *perf_atomic_op[] = { [UCT_ATOMIC_OP_ADD] = "add", [UCT_ATOMIC_OP_AND] = "and", [UCT_ATOMIC_OP_OR] = "or" , [UCT_ATOMIC_OP_XOR] = "xor" }; static const char *perf_atomic_fop[] = { [UCT_ATOMIC_OP_ADD] = "fetch-add", [UCT_ATOMIC_OP_AND] = "fetch-and", [UCT_ATOMIC_OP_OR] = "fetch-or", [UCT_ATOMIC_OP_XOR] = "fetch-xor", [UCT_ATOMIC_OP_SWAP] = "swap", [UCT_ATOMIC_OP_CSWAP] = "cswap" }; /* * This Quickselect routine is based on the algorithm described in * "Numerical recipes in C", Second Edition, * Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5 * This code by Nicolas Devillard - 1998. Public domain. */ static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n) { int low, high ; int median; int middle, ll, hh; #define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; } low = 0 ; high = n-1 ; median = (low + high) / 2; for (;;) { if (high <= low) /* One element only */ return arr[median] ; if (high == low + 1) { /* Two elements only */ if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; return arr[median] ; } /* Find median of low, middle and high items; swap into position low */ middle = (low + high) / 2; if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ; if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ; /* Swap low item (now in position middle) into position (low+1) */ ELEM_SWAP(arr[middle], arr[low+1]) ; /* Nibble from each end towards middle, swapping items when stuck */ ll = low + 1; hh = high; for (;;) { do ll++; while (arr[low] > arr[ll]) ; do hh--; while (arr[hh] > arr[low]) ; if (hh < ll) break; ELEM_SWAP(arr[ll], arr[hh]) ; } /* Swap middle item (in position low) back into correct position */ ELEM_SWAP(arr[low], arr[hh]) ; /* Re-set active partition */ if (hh <= median) low = ll; if (hh >= median) high = hh - 1; } } static ucs_status_t uct_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length, unsigned flags, uct_allocated_memory_t *alloc_mem) { ucs_status_t status; status = uct_iface_mem_alloc(perf->uct.iface, length, flags, "perftest", alloc_mem); if (status != UCS_OK) { ucs_error("failed to allocate memory: %s", ucs_status_string(status)); return status; } ucs_assert(alloc_mem->md == perf->uct.md); return UCS_OK; } static void uct_perf_test_free_host(const ucx_perf_context_t *perf, uct_allocated_memory_t *alloc_mem) { uct_iface_mem_free(alloc_mem); } static void ucx_perf_test_memcpy_host(void *dst, ucs_memory_type_t dst_mem_type, const void *src, ucs_memory_type_t src_mem_type, size_t count) { if ((dst_mem_type != UCS_MEMORY_TYPE_HOST) || (src_mem_type != UCS_MEMORY_TYPE_HOST)) { ucs_error("wrong memory type passed src - %d, dst - %d", src_mem_type, dst_mem_type); } else { memcpy(dst, src, count); } } static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; ucs_status_t status; unsigned flags; size_t buffer_size; if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) { buffer_size = params->msg_size_cnt * params->iov_stride; } else { buffer_size = ucx_perf_get_message_size(params); } /* TODO use params->alignment */ flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ? UCT_MD_MEM_FLAG_NONBLOCK : 0; flags |= UCT_MD_MEM_ACCESS_ALL; /* Allocate send buffer memory */ status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count, flags, &perf->uct.send_mem); if (status != UCS_OK) { goto err; } perf->send_buffer = perf->uct.send_mem.address; /* Allocate receive buffer memory */ status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count, flags, &perf->uct.recv_mem); if (status != UCS_OK) { goto err_free_send; } perf->recv_buffer = perf->uct.recv_mem.address; /* Allocate IOV datatype memory */ perf->params.msg_size_cnt = params->msg_size_cnt; perf->uct.iov = malloc(sizeof(*perf->uct.iov) * perf->params.msg_size_cnt * params->thread_count); if (NULL == perf->uct.iov) { status = UCS_ERR_NO_MEMORY; ucs_error("Failed allocate send IOV(%lu) buffer: %s", perf->params.msg_size_cnt, ucs_status_string(status)); goto err_free_recv; } ucs_debug("allocated memory. Send buffer %p, Recv buffer %p", perf->send_buffer, perf->recv_buffer); return UCS_OK; err_free_recv: perf->allocator->uct_free(perf, &perf->uct.recv_mem); err_free_send: perf->allocator->uct_free(perf, &perf->uct.send_mem); err: return status; } static void uct_perf_test_free_mem(ucx_perf_context_t *perf) { perf->allocator->uct_free(perf, &perf->uct.send_mem); perf->allocator->uct_free(perf, &perf->uct.recv_mem); free(perf->uct.iov); } void ucx_perf_test_start_clock(ucx_perf_context_t *perf) { ucs_time_t start_time = ucs_get_time(); perf->start_time_acc = ucs_get_accurate_time(); perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX : ucs_time_from_sec(perf->params.max_time) + start_time; perf->prev_time = start_time; perf->prev.time = start_time; perf->prev.time_acc = perf->start_time_acc; perf->current.time_acc = perf->start_time_acc; } /* Initialize/reset all parameters that could be modified by the warm-up run */ static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf, const ucx_perf_params_t *params) { unsigned i; perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX : perf->params.max_iter; perf->report_interval = ucs_time_from_sec(perf->params.report_interval); perf->current.time = 0; perf->current.msgs = 0; perf->current.bytes = 0; perf->current.iters = 0; perf->prev.msgs = 0; perf->prev.bytes = 0; perf->prev.iters = 0; perf->timing_queue_head = 0; for (i = 0; i < TIMING_QUEUE_SIZE; ++i) { perf->timing_queue[i] = 0; } ucx_perf_test_start_clock(perf); } static void ucx_perf_test_init(ucx_perf_context_t *perf, const ucx_perf_params_t *params) { unsigned group_index; perf->params = *params; group_index = rte_call(perf, group_index); if (0 == group_index) { perf->allocator = ucx_perf_mem_type_allocators[params->send_mem_type]; } else { perf->allocator = ucx_perf_mem_type_allocators[params->recv_mem_type]; } ucx_perf_test_prepare_new_run(perf, params); } void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result) { ucs_time_t median; double factor; if ((perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) || (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG_WAIT_MEM)) { factor = 2.0; } else { factor = 1.0; } result->iters = perf->current.iters; result->bytes = perf->current.bytes; result->elapsed_time = perf->current.time_acc - perf->start_time_acc; /* Latency */ median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE); result->latency.typical = ucs_time_to_sec(median) / factor; result->latency.moment_average = (perf->current.time_acc - perf->prev.time_acc) / (perf->current.iters - perf->prev.iters) / factor; result->latency.total_average = (perf->current.time_acc - perf->start_time_acc) / perf->current.iters / factor; /* Bandwidth */ result->bandwidth.typical = 0.0; // Undefined result->bandwidth.moment_average = (perf->current.bytes - perf->prev.bytes) / (perf->current.time_acc - perf->prev.time_acc) * factor; result->bandwidth.total_average = perf->current.bytes / (perf->current.time_acc - perf->start_time_acc) * factor; /* Packet rate */ result->msgrate.typical = 0.0; // Undefined result->msgrate.moment_average = (perf->current.msgs - perf->prev.msgs) / (perf->current.time_acc - perf->prev.time_acc) * factor; result->msgrate.total_average = perf->current.msgs / (perf->current.time_acc - perf->start_time_acc) * factor; } static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params) { size_t it; /* check if zero-size messages are requested and supported */ if ((/* they are not supported by: */ /* - UCT tests, except UCT AM Short/Bcopy */ (params->api == UCX_PERF_API_UCT) || (/* - UCP RMA and AMO tests */ (params->api == UCX_PERF_API_UCP) && (params->command != UCX_PERF_CMD_AM) && (params->command != UCX_PERF_CMD_TAG) && (params->command != UCX_PERF_CMD_TAG_SYNC) && (params->command != UCX_PERF_CMD_STREAM))) && ucx_perf_get_message_size(params) < 1) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size too small, need to be at least 1"); } return UCS_ERR_INVALID_PARAM; } if ((params->api == UCX_PERF_API_UCP) && ((params->send_mem_type != UCS_MEMORY_TYPE_HOST) || (params->recv_mem_type != UCS_MEMORY_TYPE_HOST)) && ((params->command == UCX_PERF_CMD_PUT) || (params->command == UCX_PERF_CMD_GET) || (params->command == UCX_PERF_CMD_ADD) || (params->command == UCX_PERF_CMD_FADD) || (params->command == UCX_PERF_CMD_SWAP) || (params->command == UCX_PERF_CMD_CSWAP))) { /* TODO: remove when support for non-HOST memory types will be added */ if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("UCP doesn't support RMA/AMO for \"%s\"<->\"%s\" memory types", ucs_memory_type_names[params->send_mem_type], ucs_memory_type_names[params->recv_mem_type]); } return UCS_ERR_INVALID_PARAM; } if (params->max_outstanding < 1) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("max_outstanding, need to be at least 1"); } return UCS_ERR_INVALID_PARAM; } /* check if particular message size fit into stride size */ if (params->iov_stride) { for (it = 0; it < params->msg_size_cnt; ++it) { if (params->msg_size_list[it] > params->iov_stride) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Buffer size %lu bigger than stride %lu", params->msg_size_list[it], params->iov_stride); } return UCS_ERR_INVALID_PARAM; } } } return UCS_OK; } void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index) { uct_ep_h ep = perf->uct.peers[peer_index].ep; uct_completion_t comp; ucs_status_t status; int started; started = 0; comp.func = NULL; comp.count = 2; do { if (!started) { status = uct_ep_flush(ep, 0, &comp); if (status == UCS_OK) { --comp.count; } else if (status == UCS_INPROGRESS) { started = 1; } else if (status != UCS_ERR_NO_RESOURCE) { ucs_error("uct_ep_flush() failed: %s", ucs_status_string(status)); return; } } uct_worker_progress(perf->uct.worker); } while (comp.count > 1); } void uct_perf_iface_flush_b(ucx_perf_context_t *perf) { ucs_status_t status; do { status = uct_iface_flush(perf->uct.iface, 0, NULL); uct_worker_progress(perf->uct.worker); } while (status == UCS_INPROGRESS); if (status != UCS_OK) { ucs_error("uct_iface_flush() failed: %s", ucs_status_string(status)); } } static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f, uint64_t bcopy_f, uint64_t zcopy_f) { return ((layout == UCT_PERF_DATA_LAYOUT_SHORT) || (layout == UCT_PERF_DATA_LAYOUT_SHORT_IOV)) ? short_f : (layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f : (layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f : 0; } static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32, uint64_t *op64, uint64_t op) { if (size == sizeof(uint32_t)) { *op32 = UCS_BIT(op); return UCS_OK; } else if (size == sizeof(uint64_t)) { *op64 = UCS_BIT(op); return UCS_OK; } return UCS_ERR_UNSUPPORTED; } static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m, size_t bcopy_m, uint64_t zcopy_m) { return ((layout == UCT_PERF_DATA_LAYOUT_SHORT) || (layout == UCT_PERF_DATA_LAYOUT_SHORT_IOV)) ? short_m : (layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m : (layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m : 0; } static ucs_status_t uct_perf_test_check_md_support(ucx_perf_params_t *params, ucs_memory_type_t mem_type, uct_md_attr_t *md_attr) { if (!(md_attr->cap.access_mem_types & UCS_BIT(mem_type)) && !(md_attr->cap.reg_mem_types & UCS_BIT(mem_type))) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Unsupported memory type %s by "UCT_PERF_TEST_PARAMS_FMT, ucs_memory_type_names[mem_type], UCT_PERF_TEST_PARAMS_ARG(params)); return UCS_ERR_INVALID_PARAM; } } return UCS_OK; } static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params, uct_iface_h iface, uct_md_h md) { uint64_t required_flags = 0; uint64_t atomic_op32 = 0; uint64_t atomic_op64 = 0; uint64_t atomic_fop32 = 0; uint64_t atomic_fop64 = 0; uct_md_attr_t md_attr; uct_iface_attr_t attr; ucs_status_t status; size_t min_size, max_size, max_iov, message_size; status = uct_md_query(md, &md_attr); if (status != UCS_OK) { ucs_error("uct_md_query(%s) failed: %s", params->uct.md_name, ucs_status_string(status)); return status; } status = uct_iface_query(iface, &attr); if (status != UCS_OK) { ucs_error("uct_iface_query("UCT_PERF_TEST_PARAMS_FMT") failed: %s", UCT_PERF_TEST_PARAMS_ARG(params), ucs_status_string(status)); return status; } min_size = 0; max_iov = 1; message_size = ucx_perf_get_message_size(params); switch (params->command) { case UCX_PERF_CMD_AM: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT, UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY); required_flags |= UCT_IFACE_FLAG_CB_SYNC; min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.am.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short, attr.cap.am.max_bcopy, attr.cap.am.max_zcopy); max_iov = attr.cap.am.max_iov; break; case UCX_PERF_CMD_PUT: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT, UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY); min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.put.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short, attr.cap.put.max_bcopy, attr.cap.put.max_zcopy); max_iov = attr.cap.put.max_iov; break; case UCX_PERF_CMD_GET: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT, UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY); min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.get.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short, attr.cap.get.max_bcopy, attr.cap.get.max_zcopy); max_iov = attr.cap.get.max_iov; break; case UCX_PERF_CMD_ADD: ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD, perf_atomic_op, params, status); max_size = 8; break; case UCX_PERF_CMD_FADD: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD, perf_atomic_fop, params, status); max_size = 8; break; case UCX_PERF_CMD_SWAP: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP, perf_atomic_fop, params, status); max_size = 8; break; case UCX_PERF_CMD_CSWAP: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP, perf_atomic_fop, params, status); max_size = 8; break; default: if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Invalid test command"); } return UCS_ERR_INVALID_PARAM; } status = ucx_perf_test_check_params(params); if (status != UCS_OK) { return status; } /* check atomics first */ ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op); ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op); ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop); ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop); /* check iface flags */ if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) && (!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support operation %s", UCT_PERF_TEST_PARAMS_ARG(params), perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]); } return UCS_ERR_UNSUPPORTED; } if (message_size < min_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size (%zu) is smaller than min supported (%zu)", message_size, min_size); } return UCS_ERR_UNSUPPORTED; } if (message_size > max_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size (%zu) is larger than max supported (%zu)", message_size, max_size); } return UCS_ERR_UNSUPPORTED; } if (params->command == UCX_PERF_CMD_AM) { if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) && (params->uct.am_hdr_size != sizeof(uint64_t))) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Short AM header size must be 8 bytes"); } return UCS_ERR_INVALID_PARAM; } if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) && (params->uct.am_hdr_size > attr.cap.am.max_hdr)) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%zu) is larger than max supported " "(%zu)", params->uct.am_hdr_size, attr.cap.am.max_hdr); } return UCS_ERR_UNSUPPORTED; } if (params->uct.am_hdr_size > message_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%zu) is larger than message size " "(%zu)", params->uct.am_hdr_size, message_size); } return UCS_ERR_INVALID_PARAM; } if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM flow-control window (%d) too large (should be <= %d)", params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW); } return UCS_ERR_INVALID_PARAM; } if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) && (params->flags & UCX_PERF_TEST_FLAG_VERBOSE)) { ucs_warn("Running active-message test with on-sided progress"); } } if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) || (UCT_PERF_DATA_LAYOUT_SHORT_IOV == params->uct.data_layout)) { if (params->msg_size_cnt > max_iov) { if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) || !params->msg_size_cnt) { ucs_error("Wrong number of IOV entries. Requested is %lu, " "should be in the range 1...%lu", params->msg_size_cnt, max_iov); } return UCS_ERR_UNSUPPORTED; } /* if msg_size_cnt == 1 the message size checked above */ if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && (UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) { if (params->uct.am_hdr_size > params->msg_size_list[0]) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%lu) larger than the first IOV " "message size (%lu)", params->uct.am_hdr_size, params->msg_size_list[0]); } return UCS_ERR_INVALID_PARAM; } } } status = uct_perf_test_check_md_support(params, params->send_mem_type, &md_attr); if (status != UCS_OK) { return status; } status = uct_perf_test_check_md_support(params, params->recv_mem_type, &md_attr); if (status != UCS_OK) { return status; } return UCS_OK; } static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf) { const size_t buffer_size = ADDR_BUF_SIZE; ucx_perf_ep_info_t info, *remote_info; unsigned group_size, i, group_index; uct_device_addr_t *dev_addr; uct_iface_addr_t *iface_addr; uct_ep_addr_t *ep_addr; uct_iface_attr_t iface_attr; uct_md_attr_t md_attr; uct_ep_params_t ep_params; void *rkey_buffer; ucs_status_t status; struct iovec vec[5]; void *buffer; void *req; buffer = malloc(buffer_size); if (buffer == NULL) { ucs_error("Failed to allocate RTE buffer"); status = UCS_ERR_NO_MEMORY; goto err; } status = uct_iface_query(perf->uct.iface, &iface_attr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status)); goto err_free; } status = uct_md_query(perf->uct.md, &md_attr); if (status != UCS_OK) { ucs_error("Failed to uct_md_query: %s", ucs_status_string(status)); goto err_free; } if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) { info.rkey_size = md_attr.rkey_packed_size; } else { info.rkey_size = 0; } info.uct.dev_addr_len = iface_attr.device_addr_len; info.uct.iface_addr_len = iface_attr.iface_addr_len; info.uct.ep_addr_len = iface_attr.ep_addr_len; info.recv_buffer = (uintptr_t)perf->recv_buffer; rkey_buffer = buffer; dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, info.rkey_size); iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, info.uct.dev_addr_len); ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, info.uct.iface_addr_len); ucs_assert_always(UCS_PTR_BYTE_OFFSET(ep_addr, info.uct.ep_addr_len) <= UCS_PTR_BYTE_OFFSET(buffer, buffer_size)); status = uct_iface_get_device_address(perf->uct.iface, dev_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_device_address: %s", ucs_status_string(status)); goto err_free; } status = uct_iface_get_address(perf->uct.iface, iface_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status)); goto err_free; } if (info.rkey_size > 0) { memset(rkey_buffer, 0, info.rkey_size); status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status)); goto err_free; } } group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers)); if (perf->uct.peers == NULL) { goto err_free; } ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE; ep_params.iface = perf->uct.iface; if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep); if (status != UCS_OK) { ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status)); goto err_destroy_eps; } status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr); if (status != UCS_OK) { ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status)); goto err_destroy_eps; } } } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR | UCT_EP_PARAM_FIELD_IFACE_ADDR; } vec[0].iov_base = &info; vec[0].iov_len = sizeof(info); vec[1].iov_base = buffer; vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len + info.uct.iface_addr_len + info.uct.ep_addr_len; rte_call(perf, post_vec, vec, 2, &req); rte_call(perf, exchange_vec, req); for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } rte_call(perf, recv, i, buffer, buffer_size, req); remote_info = buffer; rkey_buffer = remote_info + 1; dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, remote_info->rkey_size); iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, remote_info->uct.dev_addr_len); ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, remote_info->uct.iface_addr_len); perf->uct.peers[i].remote_addr = remote_info->recv_buffer; if (!uct_iface_is_reachable(perf->uct.iface, dev_addr, remote_info->uct.iface_addr_len ? iface_addr : NULL)) { ucs_error("Destination is unreachable"); status = UCS_ERR_UNREACHABLE; goto err_destroy_eps; } if (remote_info->rkey_size > 0) { status = uct_rkey_unpack(perf->uct.cmpt, rkey_buffer, &perf->uct.peers[i].rkey); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status)); goto err_destroy_eps; } } else { perf->uct.peers[i].rkey.handle = NULL; perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY; } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr); } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { ep_params.dev_addr = dev_addr; ep_params.iface_addr = iface_addr; status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep); } else { status = UCS_ERR_UNSUPPORTED; } if (status != UCS_OK) { ucs_error("Failed to connect endpoint: %s", ucs_status_string(status)); goto err_destroy_eps; } } uct_perf_iface_flush_b(perf); free(buffer); uct_perf_barrier(perf); return UCS_OK; err_destroy_eps: for (i = 0; i < group_size; ++i) { if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) { uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey); } if (perf->uct.peers[i].ep != NULL) { uct_ep_destroy(perf->uct.peers[i].ep); } } free(perf->uct.peers); err_free: free(buffer); err: return status; } static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf) { unsigned group_size, group_index, i; uct_perf_barrier(perf); uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0); group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); for (i = 0; i < group_size; ++i) { if (i != group_index) { if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) { uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey); } if (perf->uct.peers[i].ep) { uct_ep_destroy(perf->uct.peers[i].ep); } } } free(perf->uct.peers); } static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params, ucp_params_t *ucp_params) { ucs_status_t status; size_t message_size; message_size = ucx_perf_get_message_size(params); switch (params->command) { case UCX_PERF_CMD_PUT: case UCX_PERF_CMD_GET: ucp_params->features |= UCP_FEATURE_RMA; break; case UCX_PERF_CMD_ADD: case UCX_PERF_CMD_FADD: case UCX_PERF_CMD_SWAP: case UCX_PERF_CMD_CSWAP: if (message_size == sizeof(uint32_t)) { ucp_params->features |= UCP_FEATURE_AMO32; } else if (message_size == sizeof(uint64_t)) { ucp_params->features |= UCP_FEATURE_AMO64; } else { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Atomic size should be either 32 or 64 bit"); } return UCS_ERR_INVALID_PARAM; } break; case UCX_PERF_CMD_TAG: case UCX_PERF_CMD_TAG_SYNC: ucp_params->features |= UCP_FEATURE_TAG; break; case UCX_PERF_CMD_STREAM: ucp_params->features |= UCP_FEATURE_STREAM; break; case UCX_PERF_CMD_AM: ucp_params->features |= UCP_FEATURE_AM; break; default: if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Invalid test command"); } return UCS_ERR_INVALID_PARAM; } if ((params->flags & UCX_PERF_TEST_FLAG_WAKEUP) || (params->wait_mode == UCX_PERF_WAIT_MODE_SLEEP)) { ucp_params->features |= UCP_FEATURE_WAKEUP; } status = ucx_perf_test_check_params(params); if (status != UCS_OK) { return status; } return UCS_OK; } static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype, size_t iovcnt, unsigned thread_count, ucp_dt_iov_t **iov_p) { ucp_dt_iov_t *iov; if (UCP_PERF_DATATYPE_IOV == datatype) { iov = malloc(sizeof(*iov) * iovcnt * thread_count); if (NULL == iov) { ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt); return UCS_ERR_NO_MEMORY; } *iov_p = iov; } return UCS_OK; } static ucs_status_t ucp_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length, void **address_p, ucp_mem_h *memh, int non_blk_flag) { ucp_mem_map_params_t mem_map_params; ucp_mem_attr_t mem_attr; ucs_status_t status; mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS | UCP_MEM_MAP_PARAM_FIELD_LENGTH | UCP_MEM_MAP_PARAM_FIELD_FLAGS; mem_map_params.address = *address_p; mem_map_params.length = length; mem_map_params.flags = UCP_MEM_MAP_ALLOCATE; if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) { mem_map_params.flags |= non_blk_flag; } status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh); if (status != UCS_OK) { goto err; } mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS; status = ucp_mem_query(*memh, &mem_attr); if (status != UCS_OK) { goto err; } *address_p = mem_attr.address; return UCS_OK; err: return status; } static void ucp_perf_test_free_host(const ucx_perf_context_t *perf, void *address, ucp_mem_h memh) { ucs_status_t status; status = ucp_mem_unmap(perf->ucp.context, memh); if (status != UCS_OK) { ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status)); } } static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; ucs_status_t status; size_t buffer_size; if (params->iov_stride) { buffer_size = params->msg_size_cnt * params->iov_stride; } else { buffer_size = ucx_perf_get_message_size(params); } /* Allocate send buffer memory */ perf->send_buffer = NULL; status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count, &perf->send_buffer, &perf->ucp.send_memh, UCP_MEM_MAP_NONBLOCK); if (status != UCS_OK) { goto err; } /* Allocate receive buffer memory */ perf->recv_buffer = NULL; status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count, &perf->recv_buffer, &perf->ucp.recv_memh, 0); if (status != UCS_OK) { goto err_free_send_buffer; } /* Allocate AM header */ if (params->ucp.am_hdr_size != 0) { perf->ucp.am_hdr = malloc(params->ucp.am_hdr_size); if (perf->ucp.am_hdr == NULL) { goto err_free_buffers; } } else { perf->ucp.am_hdr = NULL; } /* Allocate IOV datatype memory */ perf->ucp.send_iov = NULL; status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype, perf->params.msg_size_cnt, params->thread_count, &perf->ucp.send_iov); if (UCS_OK != status) { goto err_free_am_hdr; } perf->ucp.recv_iov = NULL; status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype, perf->params.msg_size_cnt, params->thread_count, &perf->ucp.recv_iov); if (UCS_OK != status) { goto err_free_send_iov_buffers; } return UCS_OK; err_free_send_iov_buffers: free(perf->ucp.send_iov); err_free_am_hdr: free(perf->ucp.am_hdr); err_free_buffers: perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh); err_free_send_buffer: perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh); err: return UCS_ERR_NO_MEMORY; } static void ucp_perf_test_free_mem(ucx_perf_context_t *perf) { free(perf->ucp.recv_iov); free(perf->ucp.send_iov); free(perf->ucp.am_hdr); perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh); perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh); } static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf) { unsigned i, thread_count = perf->params.thread_count; ucs_status_ptr_t *req; ucs_status_t status; for (i = 0; i < thread_count; ++i) { if (perf->ucp.tctx[i].perf.ucp.rkey != NULL) { ucp_rkey_destroy(perf->ucp.tctx[i].perf.ucp.rkey); } if (perf->ucp.tctx[i].perf.ucp.ep != NULL) { req = ucp_ep_close_nb(perf->ucp.tctx[i].perf.ucp.ep, UCP_EP_CLOSE_MODE_FLUSH); if (UCS_PTR_IS_PTR(req)) { do { ucp_worker_progress(perf->ucp.tctx[i].perf.ucp.worker); status = ucp_request_check_status(req); } while (status == UCS_INPROGRESS); ucp_request_release(req); } else if (UCS_PTR_STATUS(req) != UCS_OK) { ucs_warn("failed to close ep %p on thread %d: %s\n", perf->ucp.tctx[i].perf.ucp.ep, i, ucs_status_string(UCS_PTR_STATUS(req))); } } } } static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf, ucs_status_t status) { unsigned group_size = rte_call(perf, group_size); ucs_status_t collective_status = status; struct iovec vec; void *req = NULL; unsigned i; vec.iov_base = &status; vec.iov_len = sizeof(status); rte_call(perf, post_vec, &vec, 1, &req); rte_call(perf, exchange_vec, req); for (i = 0; i < group_size; ++i) { rte_call(perf, recv, i, &status, sizeof(status), req); if (status != UCS_OK) { collective_status = status; } } return collective_status; } static ucs_status_t ucp_perf_test_receive_remote_data(ucx_perf_context_t *perf) { unsigned thread_count = perf->params.thread_count; void *rkey_buffer = NULL; void *req = NULL; unsigned group_size, group_index, i; ucx_perf_ep_info_t *remote_info; ucp_ep_params_t ep_params; ucp_address_t *address; ucs_status_t status; size_t buffer_size; void *buffer; group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); if (group_size != 2) { ucs_error("perftest requires group size to be exactly 2 " "(actual group size: %u)", group_size); return UCS_ERR_UNSUPPORTED; } buffer_size = ADDR_BUF_SIZE * thread_count; buffer = malloc(buffer_size); if (buffer == NULL) { ucs_error("failed to allocate RTE receive buffer"); status = UCS_ERR_NO_MEMORY; goto err; } /* Initialize all endpoints and rkeys to NULL to handle error flow */ for (i = 0; i < thread_count; i++) { perf->ucp.tctx[i].perf.ucp.ep = NULL; perf->ucp.tctx[i].perf.ucp.rkey = NULL; } /* receive the data from the remote peer, extract the address from it * (along with additional wireup info) and create an endpoint to the peer */ rte_call(perf, recv, 1 - group_index, buffer, buffer_size, req); remote_info = buffer; for (i = 0; i < thread_count; i++) { address = (ucp_address_t*)(remote_info + 1); rkey_buffer = UCS_PTR_BYTE_OFFSET(address, remote_info->ucp.worker_addr_len); perf->ucp.tctx[i].perf.ucp.remote_addr = remote_info->recv_buffer; ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS; ep_params.address = address; status = ucp_ep_create(perf->ucp.tctx[i].perf.ucp.worker, &ep_params, &perf->ucp.tctx[i].perf.ucp.ep); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status)); } goto err_free_eps_buffer; } if (remote_info->rkey_size > 0) { status = ucp_ep_rkey_unpack(perf->ucp.tctx[i].perf.ucp.ep, rkey_buffer, &perf->ucp.tctx[i].perf.ucp.rkey); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status)); } goto err_free_eps_buffer; } } else { perf->ucp.tctx[i].perf.ucp.rkey = NULL; } remote_info = UCS_PTR_BYTE_OFFSET(remote_info, remote_info->ucp.total_wireup_len); } free(buffer); return UCS_OK; err_free_eps_buffer: ucp_perf_test_destroy_eps(perf); free(buffer); err: return status; } static ucs_status_t ucp_perf_test_send_local_data(ucx_perf_context_t *perf, uint64_t features) { unsigned i, j, thread_count = perf->params.thread_count; size_t address_length = 0; void *rkey_buffer = NULL; void *req = NULL; ucx_perf_ep_info_t *info; ucp_address_t *address; ucs_status_t status; struct iovec *vec; size_t rkey_size; if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh, &rkey_buffer, &rkey_size); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status)); } goto err; } } else { rkey_size = 0; } /* each thread has an iovec with 3 entries to send to the remote peer: * ep_info, worker_address and rkey buffer */ vec = calloc(3 * thread_count, sizeof(struct iovec)); if (vec == NULL) { ucs_error("failed to allocate iovec"); status = UCS_ERR_NO_MEMORY; goto err_rkey_release; } /* get the worker address created for every thread and send it to the remote * peer */ for (i = 0; i < thread_count; i++) { status = ucp_worker_get_address(perf->ucp.tctx[i].perf.ucp.worker, &address, &address_length); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status)); } goto err_free_workers_vec; } vec[i * 3].iov_base = malloc(sizeof(*info)); if (vec[i * 3].iov_base == NULL) { ucs_error("failed to allocate vec entry for info"); status = UCS_ERR_NO_MEMORY; ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); goto err_free_workers_vec; } info = vec[i * 3].iov_base; info->ucp.worker_addr_len = address_length; info->ucp.total_wireup_len = sizeof(*info) + address_length + rkey_size; info->rkey_size = rkey_size; info->recv_buffer = (uintptr_t)perf->ucp.tctx[i].perf.recv_buffer; vec[(i * 3) + 0].iov_len = sizeof(*info); vec[(i * 3) + 1].iov_base = address; vec[(i * 3) + 1].iov_len = address_length; vec[(i * 3) + 2].iov_base = rkey_buffer; vec[(i * 3) + 2].iov_len = info->rkey_size; address_length = 0; } /* send to the remote peer */ rte_call(perf, post_vec, vec, 3 * thread_count, &req); rte_call(perf, exchange_vec, req); if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { ucp_rkey_buffer_release(rkey_buffer); } for (i = 0; i < thread_count; i++) { free(vec[i * 3].iov_base); ucp_worker_release_address(perf->ucp.tctx[i].perf.ucp.worker, vec[(i * 3) + 1].iov_base); } free(vec); return UCS_OK; err_free_workers_vec: for (j = 0; j < i; j++) { ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); } free(vec); err_rkey_release: if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { ucp_rkey_buffer_release(rkey_buffer); } err: return status; } static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf, uint64_t features) { ucs_status_t status; unsigned i; /* pack the local endpoints data and send to the remote peer */ status = ucp_perf_test_send_local_data(perf, features); if (status != UCS_OK) { goto err; } /* receive remote peer's endpoints' data and connect to them */ status = ucp_perf_test_receive_remote_data(perf); if (status != UCS_OK) { goto err; } /* sync status across all processes */ status = ucp_perf_test_exchange_status(perf, UCS_OK); if (status != UCS_OK) { goto err_destroy_eps; } /* force wireup completion */ for (i = 0; i < perf->params.thread_count; i++) { status = ucp_worker_flush(perf->ucp.tctx[i].perf.ucp.worker); if (status != UCS_OK) { ucs_warn("ucp_worker_flush() failed on thread %d: %s", i, ucs_status_string(status)); } } return status; err_destroy_eps: ucp_perf_test_destroy_eps(perf); err: (void)ucp_perf_test_exchange_status(perf, status); return status; } static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf) { ucp_perf_barrier(perf); ucp_perf_test_destroy_eps(perf); } static void ucp_perf_test_destroy_workers(ucx_perf_context_t *perf) { unsigned i; for (i = 0; i < perf->params.thread_count; i++) { if (perf->ucp.tctx[i].perf.ucp.worker != NULL) { ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); } } } static void ucx_perf_set_warmup(ucx_perf_context_t* perf, const ucx_perf_params_t* params) { perf->max_iter = ucs_min(params->warmup_iter, ucs_div_round_up(params->max_iter, 10)); perf->report_interval = ULONG_MAX; } static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf) { uct_component_h *uct_components; uct_component_attr_t component_attr; uct_tl_resource_desc_t *tl_resources; unsigned md_index, num_components; unsigned tl_index, num_tl_resources; unsigned cmpt_index; ucs_status_t status; uct_md_h md; uct_md_config_t *md_config; status = uct_query_components(&uct_components, &num_components); if (status != UCS_OK) { goto out; } for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) { component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT; status = uct_component_query(uct_components[cmpt_index], &component_attr); if (status != UCS_OK) { goto out_release_components_list; } component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES; component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) * component_attr.md_resource_count); status = uct_component_query(uct_components[cmpt_index], &component_attr); if (status != UCS_OK) { goto out_release_components_list; } for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) { status = uct_md_config_read(uct_components[cmpt_index], NULL, NULL, &md_config); if (status != UCS_OK) { goto out_release_components_list; } ucs_strncpy_zero(perf->params.uct.md_name, component_attr.md_resources[md_index].md_name, UCT_MD_NAME_MAX); status = uct_md_open(uct_components[cmpt_index], component_attr.md_resources[md_index].md_name, md_config, &md); uct_config_release(md_config); if (status != UCS_OK) { goto out_release_components_list; } status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources); if (status != UCS_OK) { uct_md_close(md); goto out_release_components_list; } for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) { if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) && !strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name)) { uct_release_tl_resource_list(tl_resources); perf->uct.cmpt = uct_components[cmpt_index]; perf->uct.md = md; status = UCS_OK; goto out_release_components_list; } } uct_md_close(md); uct_release_tl_resource_list(tl_resources); } } ucs_error("Cannot use "UCT_PERF_TEST_PARAMS_FMT, UCT_PERF_TEST_PARAMS_ARG(&perf->params)); status = UCS_ERR_NO_DEVICE; out_release_components_list: uct_release_component_list(uct_components); out: return status; } void uct_perf_barrier(ucx_perf_context_t *perf) { rte_call(perf, barrier, (void(*)(void*))uct_worker_progress, (void*)perf->uct.worker); } void ucp_perf_barrier(ucx_perf_context_t *perf) { rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress, #if _OPENMP (void*)perf->ucp.tctx[omp_get_thread_num()].perf.ucp.worker); #else (void*)perf->ucp.tctx[0].perf.ucp.worker); #endif } static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; uct_iface_config_t *iface_config; ucs_status_t status; uct_iface_params_t iface_params = { .field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE | UCT_IFACE_PARAM_FIELD_STATS_ROOT | UCT_IFACE_PARAM_FIELD_RX_HEADROOM | UCT_IFACE_PARAM_FIELD_CPU_MASK, .open_mode = UCT_IFACE_OPEN_MODE_DEVICE, .mode.device.tl_name = params->uct.tl_name, .mode.device.dev_name = params->uct.dev_name, .stats_root = ucs_stats_get_root(), .rx_headroom = 0 }; UCS_CPU_ZERO(&iface_params.cpu_mask); status = ucs_async_context_init(&perf->uct.async, params->async_mode); if (status != UCS_OK) { goto out; } status = uct_worker_create(&perf->uct.async, params->thread_mode, &perf->uct.worker); if (status != UCS_OK) { goto out_cleanup_async; } status = uct_perf_create_md(perf); if (status != UCS_OK) { goto out_destroy_worker; } status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL, NULL, &iface_config); if (status != UCS_OK) { goto out_destroy_md; } status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params, iface_config, &perf->uct.iface); uct_config_release(iface_config); if (status != UCS_OK) { ucs_error("Failed to open iface: %s", ucs_status_string(status)); goto out_destroy_md; } status = uct_perf_test_check_capabilities(params, perf->uct.iface, perf->uct.md); /* sync status across all processes */ status = ucp_perf_test_exchange_status(perf, status); if (status != UCS_OK) { goto out_iface_close; } status = uct_perf_test_alloc_mem(perf); if (status != UCS_OK) { goto out_iface_close; } /* Enable progress before `uct_iface_flush` and `uct_worker_progress` called * to give a chance to finish connection for some transports (ib/ud, tcp). * They may return UCS_INPROGRESS from `uct_iface_flush` when connections are * in progress */ uct_iface_progress_enable(perf->uct.iface, UCT_PROGRESS_SEND | UCT_PROGRESS_RECV); status = uct_perf_test_setup_endpoints(perf); if (status != UCS_OK) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); goto out_free_mem; } return UCS_OK; out_free_mem: uct_perf_test_free_mem(perf); out_iface_close: uct_iface_close(perf->uct.iface); out_destroy_md: uct_md_close(perf->uct.md); out_destroy_worker: uct_worker_destroy(perf->uct.worker); out_cleanup_async: ucs_async_context_cleanup(&perf->uct.async); out: return status; } static void uct_perf_cleanup(ucx_perf_context_t *perf) { uct_perf_test_cleanup_endpoints(perf); uct_perf_test_free_mem(perf); uct_iface_close(perf->uct.iface); uct_md_close(perf->uct.md); uct_worker_destroy(perf->uct.worker); ucs_async_context_cleanup(&perf->uct.async); } static void ucp_perf_request_init(void *req) { ucp_perf_request_t *request = req; request->context = NULL; } static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf) { ucp_params_t ucp_params; ucp_worker_params_t worker_params; ucp_worker_attr_t worker_attr; ucp_config_t *config; ucs_status_t status; unsigned i, thread_count; size_t message_size; ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES | UCP_PARAM_FIELD_REQUEST_SIZE | UCP_PARAM_FIELD_REQUEST_INIT; ucp_params.features = 0; ucp_params.request_size = sizeof(ucp_perf_request_t); ucp_params.request_init = ucp_perf_request_init; if (perf->params.thread_count > 1) { /* when there is more than one thread, a ucp_worker would be created for * each. all of them will share the same ucp_context */ ucp_params.features |= UCP_PARAM_FIELD_MT_WORKERS_SHARED; ucp_params.mt_workers_shared = 1; } status = ucp_perf_test_fill_params(&perf->params, &ucp_params); if (status != UCS_OK) { goto err; } status = ucp_config_read(NULL, NULL, &config); if (status != UCS_OK) { goto err; } status = ucp_init(&ucp_params, config, &perf->ucp.context); ucp_config_release(config); if (status != UCS_OK) { goto err; } thread_count = perf->params.thread_count; message_size = ucx_perf_get_message_size(&perf->params); status = ucp_perf_test_alloc_mem(perf); if (status != UCS_OK) { ucs_warn("ucp test failed to allocate memory"); goto err_cleanup; } perf->ucp.tctx = calloc(thread_count, sizeof(ucx_perf_thread_context_t)); if (perf->ucp.tctx == NULL) { ucs_warn("ucp test failed to allocate memory for thread contexts"); goto err_free_mem; } worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE; worker_params.thread_mode = perf->params.thread_mode; for (i = 0; i < thread_count; i++) { perf->ucp.tctx[i].tid = i; perf->ucp.tctx[i].perf = *perf; /* Doctor the src and dst buffers to make them thread specific */ perf->ucp.tctx[i].perf.send_buffer = UCS_PTR_BYTE_OFFSET(perf->send_buffer, i * message_size); perf->ucp.tctx[i].perf.recv_buffer = UCS_PTR_BYTE_OFFSET(perf->recv_buffer, i * message_size); status = ucp_worker_create(perf->ucp.context, &worker_params, &perf->ucp.tctx[i].perf.ucp.worker); if (status != UCS_OK) { goto err_free_tctx_destroy_workers; } } if (perf->params.command == UCX_PERF_CMD_AM) { /* Check that requested AM header size is not larger than max supported. */ worker_attr.field_mask = UCP_WORKER_ATTR_FIELD_MAX_AM_HEADER; status = ucp_worker_query(perf->ucp.tctx[0].perf.ucp.worker, &worker_attr); if (status != UCS_OK) { goto err_free_tctx_destroy_workers; } if (worker_attr.max_am_header < perf->params.ucp.am_hdr_size) { ucs_error("AM header size (%zu) is larger than max supported (%zu)", perf->params.ucp.am_hdr_size, worker_attr.max_am_header); status = UCS_ERR_INVALID_PARAM; goto err_free_tctx_destroy_workers; } } status = ucp_perf_test_setup_endpoints(perf, ucp_params.features); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); } goto err_free_tctx_destroy_workers; } return UCS_OK; err_free_tctx_destroy_workers: ucp_perf_test_destroy_workers(perf); free(perf->ucp.tctx); err_free_mem: ucp_perf_test_free_mem(perf); err_cleanup: ucp_cleanup(perf->ucp.context); err: return status; } static void ucp_perf_cleanup(ucx_perf_context_t *perf) { ucp_perf_test_cleanup_endpoints(perf); ucp_perf_barrier(perf); ucp_perf_test_free_mem(perf); ucp_perf_test_destroy_workers(perf); free(perf->ucp.tctx); ucp_cleanup(perf->ucp.context); } static struct { ucs_status_t (*setup)(ucx_perf_context_t *perf); void (*cleanup)(ucx_perf_context_t *perf); ucs_status_t (*run)(ucx_perf_context_t *perf); void (*barrier)(ucx_perf_context_t *perf); } ucx_perf_funcs[] = { [UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup, uct_perf_test_dispatch, uct_perf_barrier}, [UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup, ucp_perf_test_dispatch, ucp_perf_barrier} }; static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result); ucs_status_t ucx_perf_run(const ucx_perf_params_t *params, ucx_perf_result_t *result) { ucx_perf_context_t *perf; ucs_status_t status; ucx_perf_global_init(); if (params->command == UCX_PERF_CMD_LAST) { ucs_error("Test is not selected"); status = UCS_ERR_INVALID_PARAM; goto out; } if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) { ucs_error("Invalid test API parameter (should be UCT or UCP)"); status = UCS_ERR_INVALID_PARAM; goto out; } perf = malloc(sizeof(*perf)); if (perf == NULL) { status = UCS_ERR_NO_MEMORY; goto out; } ucx_perf_test_init(perf, params); if (perf->allocator == NULL) { ucs_error("Unsupported memory types %s<->%s", ucs_memory_type_names[params->send_mem_type], ucs_memory_type_names[params->recv_mem_type]); status = UCS_ERR_UNSUPPORTED; goto out_free; } if ((params->api == UCX_PERF_API_UCT) && (perf->allocator->mem_type != UCS_MEMORY_TYPE_HOST)) { ucs_warn("UCT tests also copy 2-byte values from %s memory to " "%s memory, which may impact performance results", ucs_memory_type_names[perf->allocator->mem_type], ucs_memory_type_names[UCS_MEMORY_TYPE_HOST]); } status = perf->allocator->init(perf); if (status != UCS_OK) { goto out_free; } status = ucx_perf_funcs[params->api].setup(perf); if (status != UCS_OK) { goto out_free; } if (params->thread_count == 1) { if (params->api == UCX_PERF_API_UCP) { perf->ucp.worker = perf->ucp.tctx[0].perf.ucp.worker; perf->ucp.ep = perf->ucp.tctx[0].perf.ucp.ep; perf->ucp.remote_addr = perf->ucp.tctx[0].perf.ucp.remote_addr; perf->ucp.rkey = perf->ucp.tctx[0].perf.ucp.rkey; } if (params->warmup_iter > 0) { ucx_perf_set_warmup(perf, params); status = ucx_perf_funcs[params->api].run(perf); if (status != UCS_OK) { goto out_cleanup; } ucx_perf_funcs[params->api].barrier(perf); ucx_perf_test_prepare_new_run(perf, params); } /* Run test */ status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (status == UCS_OK) { ucx_perf_calc_result(perf, result); rte_call(perf, report, result, perf->params.report_arg, 1, 0); } } else { status = ucx_perf_thread_spawn(perf, result); } out_cleanup: ucx_perf_funcs[params->api].cleanup(perf); out_free: free(perf); out: return status; } #if _OPENMP static ucs_status_t ucx_perf_thread_run_test(void* arg) { ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */ ucx_perf_result_t* result = &tctx->result; ucx_perf_context_t* perf = &tctx->perf; ucx_perf_params_t* params = &perf->params; ucs_status_t status; /* new threads need explicit device association */ status = perf->allocator->init(perf); if (status != UCS_OK) { goto out; } if (params->warmup_iter > 0) { ucx_perf_set_warmup(perf, params); status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (UCS_OK != status) { goto out; } ucx_perf_test_prepare_new_run(perf, params); } /* Run test */ #pragma omp barrier status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (UCS_OK != status) { goto out; } ucx_perf_calc_result(perf, result); out: return status; } static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf) { ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */ unsigned i, thread_count = perf->params.thread_count; double lat_sum_total_avegare = 0.0; ucx_perf_result_t agg_result; agg_result.iters = tctx[0].result.iters; agg_result.bytes = tctx[0].result.bytes; agg_result.elapsed_time = tctx[0].result.elapsed_time; agg_result.bandwidth.total_average = 0.0; agg_result.bandwidth.typical = 0.0; /* Undefined since used only for latency calculations */ agg_result.latency.total_average = 0.0; agg_result.msgrate.total_average = 0.0; agg_result.msgrate.typical = 0.0; /* Undefined since used only for latency calculations */ /* when running with multiple threads, the moment average value is * undefined since we don't capture the values of the last iteration */ agg_result.msgrate.moment_average = 0.0; agg_result.bandwidth.moment_average = 0.0; agg_result.latency.moment_average = 0.0; agg_result.latency.typical = 0.0; /* in case of multiple threads, we have to aggregate the results so that the * final output of the result would show the performance numbers that were * collected from all the threads. * BW and message rate values will be the sum of their values from all * the threads, while the latency value is the average latency from the * threads. */ for (i = 0; i < thread_count; i++) { agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average; agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average; lat_sum_total_avegare += tctx[i].result.latency.total_average; } agg_result.latency.total_average = lat_sum_total_avegare / thread_count; rte_call(perf, report, &agg_result, perf->params.report_arg, 1, 1); } static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result) { ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */ int ti, thread_count = perf->params.thread_count; ucs_status_t* statuses; ucs_status_t status; omp_set_num_threads(thread_count); statuses = calloc(thread_count, sizeof(ucs_status_t)); if (statuses == NULL) { status = UCS_ERR_NO_MEMORY; goto out; } #pragma omp parallel private(ti) { ti = omp_get_thread_num(); tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]); } status = UCS_OK; for (ti = 0; ti < thread_count; ti++) { if (UCS_OK != tctx[ti].status) { ucs_error("Thread %d failed to run test: %s", tctx[ti].tid, ucs_status_string(tctx[ti].status)); status = tctx[ti].status; } } ucx_perf_thread_report_aggregated_results(perf); free(statuses); out: return status; } #else static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result) { ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)"); return UCS_ERR_INVALID_PARAM; } #endif /* _OPENMP */ void ucx_perf_global_init() { static ucx_perf_allocator_t host_allocator = { .mem_type = UCS_MEMORY_TYPE_HOST, .init = ucs_empty_function_return_success, .ucp_alloc = ucp_perf_test_alloc_host, .ucp_free = ucp_perf_test_free_host, .uct_alloc = uct_perf_test_alloc_host, .uct_free = uct_perf_test_free_host, .memcpy = ucx_perf_test_memcpy_host, .memset = memset }; UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest); ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_HOST] = &host_allocator; /* FIXME Memtype allocator modules must be loaded to global scope, otherwise * alloc hooks, which are using dlsym() to get pointer to original function, * do not work. Need to use bistro for memtype hooks to fix it. */ UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL); }